diff --git "a/1053.jsonl" "b/1053.jsonl" new file mode 100644--- /dev/null +++ "b/1053.jsonl" @@ -0,0 +1,489 @@ +{"seq_id": "331847681", "text": "import os \nimport nltk\nimport numpy as np\nimport nltk.tree as Tree\nfrom nltk import treetransforms, induce_pcfg, Nonterminal\n\nclass preparation():\n\n\tdef __init__(self):\n\t\ttrain_file = open(\"filename\",\"w\")\n\t\twith open(\"sequoia-corpus+fct.mrg_strict.txt\",\"r\") as file:\n\t\t\tdata = file.readlines() #split into lines\n\t\n\t\t\tfor line in data :\n\t\t\t\t#we remove first and last 2 character, we cannot do that in words because when we split we have words and no more characters\n\t\t\t\twords = line[1:-2].split() #words is the list of words for sentence line, we supprime the first and the last 2 character\n\t\t\t\tfor w in words : # we take each words\n\t\t\t\t\tif w[0] == \"(\": #If the first character is a \"(\" we know that it is not terminal so we remove \"-\" when we find one\n\t\t\t\t\t\tif \"-\" in w:\n\t\t\t\t\t\t\ttrain_file.write(w[:w.index(\"-\")]) #W.index(\"-\") gives the place where \"-\" is.\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\ttrain_file.write(w)\n\t\t\t\t\telse : #it's terminal, so we write the word even if we have \"-\" because it could be a a date for example\n\t\t\t\t\t\ttrain_file.write(w)\n\t\t\t\t\ttrain_file.write(\" \") # we need to add a spaxce between words\n\t\t\t\ttrain_file.write(\"\\n\") #we finish the sentence, we write in another line\n\n\t\ttrain_file.close()\n\n\n\n\t\t#WE SPLIT INTO TRAIN VALIDATION and TEST set\n\t\tfile1 = open(\"train_file\",\"w\")\n\t\tfile2 = open(\"validation_file\",\"w\")\n\t\tfile3 = open(\"test_file\",\"w\")\n\t\twith open(\"filename\",\"r\") as f:\n\t\t\tdata = f.readlines()\n\t\t\tlongueur = len(data)\n\t\t\tfor line in data[:int(longueur*0.8)]:\n\t\t\t\twords = line.split()\n\t\t\t\tfor w in words : \n\t\t\t\t\tfile1.write(w)\n\t\t\t\t\tfile1.write(\" \")\n\t\t\t\tfile1.write(\"\\n\")\n\n\t\t\tfor line in data[int(longueur*0.8) : int(longueur*0.9)]:\n\t\t\t\twords = line.split()\n\t\t\t\tfor w in words : \n\t\t\t\t\tfile2.write(w)\n\t\t\t\t\tfile2.write(\" \")\n\t\t\t\tfile2.write(\"\\n\")\n\n\t\t\tfor line in data[int(longueur*0.9):]:\n\t\t\t\twords = line.split()\n\t\t\t\tfor w in words : \n\t\t\t\t\tfile3.write(w)\n\t\t\t\t\tfile3.write(\" \")\n\t\t\t\tfile3.write(\"\\n\")\n\n\t\tfile1.close()\n\t\tfile2.close()\n\t\tfile3.close()\n\n\n\n\t#We create the file with just word and not the grammar\n\t\ttest_sentence = open(\"test_sentence\",\"w\")\n\t\twith open(\"test_file\",\"r\") as txt:\n\t\t\tfor phrase in txt :\n\t\t\t\ttest_tree = nltk.tree.Tree.fromstring(phrase)\n\t\t\t\tfor word in test_tree.leaves():\n\t\t\t\t\ttest_sentence.write(word)\n\t\t\t\t\ttest_sentence.write(\" \")\n\t\t\t\ttest_sentence.write(\"\\n\")\n\n\t\n\t\ttest_sentence.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "MVA_TD2_LEVY_John/system/extraction_preparation.py", "file_name": "extraction_preparation.py", "file_ext": "py", "file_size_in_byte": 2342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "nltk.tree.Tree.fromstring", "line_number": 70, "usage_type": "call"}, {"api_name": "nltk.tree", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "83600687", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n # http://localhost:8000/blog/1\n path('', views.blog_list, name='blog_list'),\n path('', views.blog_detail, name='blog_detail'),\n # http://localhost:8000/blog/type/1\n path('type/', views.blogs_with_type, name='blogs_with_type'),\n path('author/', views.blogs_with_author, name='blogs_with_author'),\n]", "sub_path": "myblog/mysite_env/blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "574617130", "text": "from sqlalchemy.orm import Session\nfrom server import models\nfrom server.elastic import Elastic\nelastic = Elastic()\n\n\ndef delete_post_by_id(id_delete: int):\n deleted_post = elastic.search_by_id(id_delete)\n\n if deleted_post is not None:\n elastic_deleted = elastic.delete_by_id(deleted_post[0]['_id'])\n\n return elastic_deleted\n return False\n\n\ndef get_posts(db: Session, text: str):\n id_list = [item[\"id\"] for item in elastic.search_by_text(text)]\n\n result = db.query(models.Post) \\\n .filter(models.Post.id.in_(id_list)) \\\n .order_by(models.Post.created_date.desc()) \\\n .all()\n return result\n", "sub_path": "server/db_crud.py", "file_name": "db_crud.py", "file_ext": "py", "file_size_in_byte": 642, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "server.elastic.Elastic", "line_number": 4, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 17, "usage_type": "name"}, {"api_name": "server.models.Post", "line_number": 20, "usage_type": "attribute"}, {"api_name": "server.models", "line_number": 20, "usage_type": "name"}, {"api_name": "server.models.Post.id.in_", "line_number": 21, "usage_type": "call"}, {"api_name": "server.models.Post", "line_number": 21, "usage_type": "attribute"}, {"api_name": "server.models", "line_number": 21, "usage_type": "name"}, {"api_name": "server.models.Post.created_date.desc", "line_number": 22, "usage_type": "call"}, {"api_name": "server.models.Post", "line_number": 22, "usage_type": "attribute"}, {"api_name": "server.models", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "139295314", "text": "from common.lib.servers.Pulser2.pulse_sequences.pulse_sequence import pulse_sequence\nfrom barium.lib.scripts.pulse_sequences.sub_sequences.DopplerCooling133 import doppler_cooling_133 as doppler_cooling_133\n\"\"\"\n6/17/17\nKeeping the same format as optical pumping, but here the TTL is not auto inverted so\nwe use ttl high to turn on, and off time is just empty space at the end if needed.\n\"\"\"\n\nclass state_detection_133(pulse_sequence):\n\n required_parameters = [\n ('StateDetection133', 'state_detection_duration'),\n ('StateDetection133', 'TTL_493'),\n ('StateDetection133', 'TTL_650')\n ]\n\n #required_parameters.extend(doppler_cooling_133.all_required_parameters())\n\n def sequence(self):\n # start time is defined to be 0s.\n p = self.parameters.StateDetection133\n\n\n self.ttl_493 = p.TTL_493\n self.ttl_650 = p.TTL_650\n\n self.addTTL('TimeResolvedCount', self.start, p.state_detection_duration)\n self.addTTL(self.ttl_493, self.start, p.state_detection_duration)\n self.addTTL(self.ttl_650, self.start, p.state_detection_duration)\n self.end = self.start + p.state_detection_duration\n\n", "sub_path": "lib/scripts/pulse_sequences/sub_sequences/StateDetection133.py", "file_name": "StateDetection133.py", "file_ext": "py", "file_size_in_byte": 1243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "common.lib.servers.Pulser2.pulse_sequences.pulse_sequence.pulse_sequence", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "295922766", "text": "'''\n@author:sjie\n'''\nimport pymysql\nimport re\n#连接数据库\ndef get_newprice(markercode):\n url='''http://market43.gdiex.com/market/status.do?market=GDIEX&contract={markercode}'''.format(markercode=markercode)\n response = urllib.request.urlopen(url)\n data = response.read()\n data = data.decode('utf-8')\n #print(data)\n data_split = re.split(',',data)[6].split(':')\n data_dict = {}\n data_dict['status'] = data_split[1]\n for status,value in data_dict.items():\n return (value)\n\ndef connect_mysql():\n conn = pymysql.connect(host='10.0.1.61',user='shengjie',passwd='520xiaowen',db='fxeasy')\n try:\n with conn.cursor() as cursor:\n sql = \"SELECT count(1) FROM et_storages WHERE buying_date >= '2016-12-07 08:00:00'\"\n cursor.execute(sql)\n result=cursor.fetchone()\n print(str(result)[1:-2])\n except Exception as e:\n print(e)\n finally:\n conn.close()\n\nconnect_mysql()", "sub_path": "python/php/python/Mysql.py", "file_name": "Mysql.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "re.split", "line_number": 13, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "60479019", "text": "from numpy import isnan, transpose\nfrom scipy.io import loadmat\n\ntry:\n from h5py import File\nexcept ImportError:\n File = None\n\n\ndef import_electrodes(mat_file, n_chan):\n\n try:\n mat_all = loadmat(mat_file)\n for varname, mat in mat_all.items():\n if varname.startswith('__'):\n continue\n elec = _find_electrodes(mat, n_chan)\n if elec is not None:\n return elec\n\n except NotImplementedError:\n if File is None:\n raise ImportError('You need to install h5py to open this file')\n\n with File(mat_file, 'r') as f:\n for varname in f:\n mat = transpose(f[varname][()])\n elec = _find_electrodes(mat, n_chan)\n if elec is not None:\n return elec\n\n return None\n\n\ndef _find_electrodes(mat, n_chan):\n print(f'Number of electrodes in mat file: {mat.shape[0]}')\n if mat.shape[0] == n_chan:\n return mat\n\n has_nan = isnan(mat).all(axis=1)\n mat = mat[~has_nan, :3]\n\n print(f'Number of electrodes in mat file without nan: {mat.shape[0]}')\n if mat.shape[0] == n_chan:\n return mat\n\n return None\n", "sub_path": "xelo2/io/electrodes.py", "file_name": "electrodes.py", "file_ext": "py", "file_size_in_byte": 1192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "h5py.File", "line_number": 7, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 13, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 22, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "500721502", "text": "from front_machine.config.parser import get_config_from_json\nimport pandas as pd\nimport argparse\nimport time\nimport zmq\n\ndef result_collector(address, outputPath, numTerminate, is_test=False):\n \"\"\"\n takes controur values of an image and save them in a text file.\n Args:\n address : string of the ip address followed by the port to make the connection with contours_node.\n outputPath: string path to the output text.\n numTerminate: number of terminates to be sent\n \"\"\"\n #make the connections\n context = zmq.Context()\n results_receiver = context.socket(zmq.PULL)\n results_receiver.bind(address)\n\n #create an output dictionary\n out_dict = {\"Frame Number\": [], \"Contours\": []}\n counter = 0\n TerminationCount = 0\n\n #receive the contours and save them in a txt file\n while True:\n if TerminationCount == numTerminate:\n break\n\n work = results_receiver.recv_pyobj()\n data = work['contours']\n\n if len(data) == 0:\n TerminationCount += 1\n continue\n\n #add the results to output dictionary\n out_dict[\"Frame Number\"].append(\"Frame #{}\".format(counter))\n out_dict[\"Contours\"].append(data)\n counter += 1\n\n #create a dataframe and write outputs\n out_df = pd.DataFrame(out_dict, columns=[\"Frame Number\", \"Contours\"])\n out_df.to_csv(outputPath)\n\n # return if the caller is a test\n if is_test:\n return\n\n # wait for the other processes to finish \n # time.sleep(10) \n\ndef main():\n \"\"\"Main driver of output node\"\"\"\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument('-t', '--text_path', type=str, help='path to the output text')\n argparser.add_argument('-n', '--total_num', type=int, help='total number of consumer nodes')\n \n args = argparser.parse_args()\n\n config = get_config_from_json(\"front_machine/config/server.json\") # get other nodes addresses from json config\n\n result_collector(config.output_socket, args.text_path, args.total_num) # call the output collector process\n\nif __name__=='__main__':\n main()", "sub_path": "front_machine/output_node.py", "file_name": "output_node.py", "file_ext": "py", "file_size_in_byte": 2172, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "zmq.Context", "line_number": 16, "usage_type": "call"}, {"api_name": "zmq.PULL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 55, "usage_type": "call"}, {"api_name": "front_machine.config.parser.get_config_from_json", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "328071653", "text": "from decimal import Decimal\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom btcrpc.utils import constantutil\nfrom btcrpc.utils.config_file_reader import ConfigFileReader\nfrom btcrpc.vo.wallet_balance import (\n GetWalletBalancePostParameterSerializer,\n WalletsBalanceResponseSerializer)\nfrom btcrpc.utils.rpc_calls.rpc_instance_generator import RpcGenerator\nfrom btcrpc.utils.chain_enum import ChainEnum\nimport logging\n\nlog = logging.getLogger(__name__)\nyml_config = ConfigFileReader()\n\n\nclass CheckWalletsBalance(APIView):\n\n def post(self, request):\n post_serializers = GetWalletBalancePostParameterSerializer(\n data=request.data)\n post_serializers.is_valid(raise_exception=True)\n\n chain = ChainEnum.UNKNOWN\n wallet_balance_response_list = []\n currency = post_serializers.data[\"currency\"]\n wallet_list = yml_config.get_wallet_list(currency)\n log.info(wallet_list)\n\n for wallet_json in wallet_list:\n wallet = wallet_json[\"wallet_name\"]\n wallet_type = wallet_json[\"wallet_type\"]\n\n log.info(wallet)\n rpc_call = RpcGenerator.get_rpc_instance(wallet=wallet,\n currency=currency)\n chain = constantutil.check_service_chain(rpc_call)\n log.info(chain)\n balance = rpc_call.get_wallet_balance()\n log.info(format(balance, \"0.8f\"))\n wallet_balance_response = {\n \"wallet\": wallet,\n \"wallet_type\": wallet_type,\n \"balance\": Decimal(balance),\n \"chain\": chain.value,\n \"error\": 0,\n \"error_message\": \"\"}\n\n log.info(wallet_balance_response)\n wallet_balance_response_list.append(wallet_balance_response)\n\n s = WalletsBalanceResponseSerializer(\n data={\"wallets\": wallet_balance_response_list})\n s.is_valid(raise_exception=True)\n return Response(s.data)\n", "sub_path": "btcxblockchainapi/btcrpc/view/check_wallets_balance.py", "file_name": "check_wallets_balance.py", "file_ext": "py", "file_size_in_byte": 2043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "btcrpc.utils.config_file_reader.ConfigFileReader", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 17, "usage_type": "name"}, {"api_name": "btcrpc.vo.wallet_balance.GetWalletBalancePostParameterSerializer", "line_number": 20, "usage_type": "call"}, {"api_name": "btcrpc.utils.chain_enum.ChainEnum.UNKNOWN", "line_number": 24, "usage_type": "attribute"}, {"api_name": "btcrpc.utils.chain_enum.ChainEnum", "line_number": 24, "usage_type": "name"}, {"api_name": "btcrpc.utils.rpc_calls.rpc_instance_generator.RpcGenerator.get_rpc_instance", "line_number": 35, "usage_type": "call"}, {"api_name": "btcrpc.utils.rpc_calls.rpc_instance_generator.RpcGenerator", "line_number": 35, "usage_type": "name"}, {"api_name": "btcrpc.utils.constantutil.check_service_chain", "line_number": 37, "usage_type": "call"}, {"api_name": "btcrpc.utils.constantutil", "line_number": 37, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 44, "usage_type": "call"}, {"api_name": "btcrpc.vo.wallet_balance.WalletsBalanceResponseSerializer", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "615618276", "text": "from flask import render_template, session, redirect, url_for, current_app, \\\n flash\nfrom flask import request\nfrom flask_login import login_required\nfrom . import main\nfrom .forms import PostForm\nfrom ..models import Post\nfrom .. import db\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n page = request.args.get('page', 1, type=int)\n pagination = Post.query.order_by(Post.timestamp.desc()).paginate(\n page, per_page=current_app.config['POSTS_PER_PAGE'],\n error_out=True)\n posts = pagination.items\n return render_template('index.html', posts=posts,\n pagination=pagination)\n\n\n@main.route('/about', methods=['GET', 'POST'])\ndef about():\n return render_template('about.html')\n\n\n@main.route('/add-post', methods=['GET', 'POST'])\n@login_required\ndef add_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(title=form.title.data, body=form.body.data)\n db.session.add(post)\n return redirect(url_for('main.index'))\n return render_template('add_post.html', form=form)\n\n\n@main.route('/post/')\ndef view_post(id):\n post = Post.query.get_or_404(id)\n return render_template('post.html', posts=[post])\n\n\n@main.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_post(id):\n post = Post.query.get_or_404(id)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.body = form.body.data\n db.session.add(post)\n flash('This post has been updated.')\n return redirect(url_for('main.view_post', id=post.id))\n form.title.data = post.title\n form.body.data = post.body\n return render_template('edit_post.html', form=form)\n", "sub_path": "app/main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1740, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.request.args.get", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Post.query.order_by", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Post.query", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 14, "usage_type": "name"}, {"api_name": "models.Post.timestamp.desc", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Post.timestamp", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.current_app.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Post.query.get_or_404", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Post.query", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Post.query.get_or_404", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Post.query", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 47, "usage_type": "name"}, {"api_name": "forms.PostForm", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "651877325", "text": "from django.views.generic import FormView\nfrom google.appengine.api import users\nfrom guestbook.api import JsonResponse\n\n\nclass Auth(JsonResponse.JSONResponseMixin, FormView):\n\n\tdef get(self, request, *args, **kwargs):\n\t\tif users.get_current_user():\n\t\t\turl = users.create_logout_url('/')\n\t\t\turl_linktext = 'Logout'\n\t\t\tuser_email = users.get_current_user().email()\n\t\telse:\n\t\t\turl = users.create_login_url('/')\n\t\t\turl_linktext = 'Login'\n\t\t\tuser_email = ''\n\n\t\tcontext = {\n\t\t\t'url': url,\n\t\t\t'url_linktext': url_linktext,\n\t\t\t'user_email': user_email\n\t\t}\n\t\treturn self.render_to_response(context)\n", "sub_path": "guestbook/api/api_view.py", "file_name": "api_view.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "guestbook.api.JsonResponse.JSONResponseMixin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "guestbook.api.JsonResponse", "line_number": 6, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 6, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 9, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 9, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 10, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 10, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 12, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 12, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "331567437", "text": "# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2015-Present Webkul Software Pvt. Ltd. ()\n# See LICENSE file for full copyright and licensing details.\n# License URL : \n##############################################################################\n\nimport logging\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import Warning\n\n_logger = logging.getLogger(__name__)\n\n\nclass StockWizardMessage(models.TransientModel):\n _name = \"stock.inventory.wizard\"\n _description = \"Stock Inventory Wizard\"\n\n text = fields.Text(string='Message')\n\n def generated_message(self, message, name='Message/Summary'):\n partial_id = self.create({'text': message}).id\n return {\n 'name': name,\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'stock.inventory.wizard',\n 'res_id': partial_id,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': '[]',\n 'context': self._context\n }\n\n def view_inventory(self):\n return {\n 'name': 'Inventory',\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'stock.inventory',\n 'res_id': self._context.get('inventory_id'),\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n # 'target': 'new',\n 'domain': '[]',\n }\n", "sub_path": "webkul_addons/advance_inventory_import/wizards/wizard_message.py", "file_name": "wizard_message.py", "file_ext": "py", "file_size_in_byte": 1568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 16, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 16, "usage_type": "name"}, {"api_name": "odoo.fields.Text", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "316028676", "text": "import os\nimport json\nfrom pathlib import Path\n\n\nclass JobManager:\n\n @staticmethod\n def parse_json(data, keys):\n for key in keys:\n if key in data:\n data[key] = json.loads(data[key])\n return data\n\n @staticmethod\n def get_job(job_id):\n file_path = Path(os.path.join(os.path.dirname(__file__), '..', 'jobs', '{}.json'.format(job_id)))\n if not file_path.exists():\n if not Path(os.path.join(os.path.dirname(__file__),\n '..',\n 'task_records',\n '{}.tmp'.format(job_id)\n )).exists():\n return {\n 'data': None,\n 'errorCode': 1001,\n 'message': '非法jobId',\n }\n else:\n return None\n with open(file_path, 'r') as f:\n try:\n data = json.load(f)\n if data['data'] is not None:\n data['data'] = JobManager.parse_json(data['data'], ['poiStartName', 'poiEndName'])\n except BaseException as error:\n return {\n 'data': None,\n 'errorCode': 1001,\n 'message': '产生异常数据',\n }\n return {\n 'data': data.get('data', None),\n 'errorCode': data.get('errorCode', None),\n 'message': data.get('message', None),\n }\n", "sub_path": "util/job_manager.py", "file_name": "job_manager.py", "file_ext": "py", "file_size_in_byte": 1553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "json.loads", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "json.load", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "160710867", "text": "import joblib\nimport pandas as pd\nimport sys\nfrom sklearn import metrics\n\npd.set_option('display.max_columns', None)\n\nname = sys.argv[1]\nDATA = pd.read_csv('./csvFinal2.csv')\nsample = DATA[DATA['Name']== name]\n\nsample_X = sample.drop(['Label', 'Frame' ,'Name'], axis=1)\n\nsample_y = sample['Label']\n\nclassifier = joblib.load('./trainedModels/KNeighborsClassifier(n_jobs=-1, n_neighbors=1).joblib')\npreds=classifier.predict(sample_X)\n\n\ndf = pd.DataFrame(preds, columns=[\"Frame predicition\"])\nprint(df.value_counts())\nprint(df)\n\nacc = metrics.accuracy_score(sample_y, preds)\nprint(\"Accuracy: \", acc)\nprint(\"real\", sample_y.iloc[0])", "sub_path": "SquatClassifier.py", "file_name": "SquatClassifier.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.set_option", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "67505673", "text": "#!/usr/bin/env python3\n# coding:utf-8\n\nimport sys\n\nimport requests\n\nfrom .style import use_style\n\n\ndef translate():\n if len(sys.argv) >= 2:\n word = sys.argv[1]\n else:\n print(use_style('内容不能为空.', 'red'))\n return\n\n url = 'http://fy.iciba.com/ajax.php?a=fy'\n headers = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Host': 'fy.iciba.com',\n 'Origin': 'http://fy.iciba.com',\n 'Referer': 'http://fy.iciba.com/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest',\n }\n forms = {\n 'f': 'auto',\n 't': 'auto',\n 'w': word,\n }\n r = requests.post(url, headers=headers, data=forms)\n jd = r.json()\n content = jd['content']\n if 'word_mean' in content:\n print(use_style('\\n'.join(content['word_mean']), 'yellow'))\n else:\n print(use_style(content['out'], 'yellow'))\n", "sub_path": "fy/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 1234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "style.use_style", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "style.use_style", "line_number": 40, "usage_type": "call"}, {"api_name": "style.use_style", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "528528622", "text": "import maya.api.OpenMaya as om2\n\n\ndef createDAGNode(dagNodeType, nodeName=\"newDAGNode\"):\n \"\"\"\n Create a new new DAG node of type with name\n :param createNodeType: str - node type\n :param nodeName: str\n :return: MObject - new node MObject\n \"\"\"\n dagMod = om2.MDagModifier()\n newDAGNode = dagMod.createNode(dagNodeType)\n dagMod.renameNode(newDAGNode, nodeName + \"_\" + dagNodeType)\n dagMod.doIt()\n return newDAGNode\n\n\ndef createDGNode(dgNodeType, nodeName=\"newDGNode\"):\n \"\"\"\n Create a new new DG node of type with name\n :param createNodeType: str - node type\n :param nodeName: str\n :return: MObject - new node MObject\n \"\"\"\n dgMod = om2.MDGModifier()\n newDGNode = dgMod.createNode(dgNodeType)\n dgMod.renameNode(newDGNode, nodeName + \"_\" + dgNodeType)\n dgMod.doIt()\n return newDGNode\n", "sub_path": "creation_utils/creation_utils.py", "file_name": "creation_utils.py", "file_ext": "py", "file_size_in_byte": 844, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "maya.api.OpenMaya.MDagModifier", "line_number": 11, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 11, "usage_type": "name"}, {"api_name": "maya.api.OpenMaya.MDGModifier", "line_number": 25, "usage_type": "call"}, {"api_name": "maya.api.OpenMaya", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "99445412", "text": "import numpy as np\nfrom Bio import SeqIO\nfrom Bio.Align import substitution_matrices\nfrom itertools import product\n\nMAT = substitution_matrices.load(\"BLOSUM62\")\nMIN_INT = np.iinfo(np.int32).min\nGAPOPEN = 11\nGAPEXTEND = 1\n# test datasets are too big, may not compute in time\n# on some of them\ndef laff(s1: str, s2:str) -> tuple:\n m = len(s1)\n n = len(s2)\n max_score, max_i, max_j = MIN_INT, 0, 0\n upper = np.zeros(shape=(m+1, n+1), dtype=int)\n middle = np.zeros(shape=(m+1, n+1), dtype=int)\n lower = np.zeros(shape=(m+1, n+1), dtype=int)\n pntrs = np.zeros(shape=(m+1, n+1), dtype=int)\n # fill tables\n for i, j in product(range(1, m+1), range(1, n+1)):\n upper[i, j] = max(\n upper[i, j-1] - GAPEXTEND,\n middle[i, j-1] - GAPOPEN\n )\n lower[i, j] = max(\n lower[i-1, j] - GAPEXTEND,\n middle[i-1, j] - GAPOPEN\n )\n scores = [\n lower[i, j],\n middle[i-1, j-1] + MAT[s1[i-1], s2[j-1]],\n upper[i, j],\n 0 # index will be 3 -> backtrack stop\n ]\n middle[i, j] = max(scores)\n pntrs[i, j] = scores.index(middle[i, j])\n if (middle[i, j] > max_score):\n max_score = middle[i, j]\n max_i, max_j = i, j\n # backtrack\n i, j = max_i, max_j\n s1_al, s2_al = s1[:i], s2[:j]\n while (i > 0) and (j > 0):\n if (pntrs[i, j] == 0):\n i -= 1\n elif (pntrs[i, j] == 1):\n i -= 1\n j -= 1\n elif (pntrs[i, j] == 2):\n j -= 1\n else: # pntrs[i, j] == 3\n break\n s1_al, s2_al = s1_al[i:], s2_al[j:]\n return max_score, s1_al, s2_al\n\ndef main():\n seq1, seq2 = (item.seq for item in SeqIO.parse(\"rosalind_laff.txt\", \"fasta\"))\n \n with open(\"out.txt\", \"w\") as o:\n print(*laff(seq1, seq2), sep='\\n', file=o) \n\nif __name__ == \"__main__\":\n main()", "sub_path": "Bioinformatics Stronghold/79_laff.py", "file_name": "79_laff.py", "file_ext": "py", "file_size_in_byte": 1912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Bio.Align.substitution_matrices.load", "line_number": 6, "usage_type": "call"}, {"api_name": "Bio.Align.substitution_matrices", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.iinfo", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 21, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 58, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "162781757", "text": "# Copyright (c) 2016-2022 Association of Universities for Research in Astronomy, Inc. (AURA)\n# For license information see LICENSE or https://opensource.org/licenses/BSD-3-Clause\n\nimport asyncio\nimport signal\nimport functools\nfrom datetime import datetime\nfrom multiprocessing import Process\nfrom random import randint\n\nfrom scheduler.core.meta import Singleton\nfrom scheduler.core.service.service import Service\nfrom scheduler.core.service.modes import SchedulerModes\nfrom scheduler.config import config\n\nfrom .runner import StandardRunner\nfrom .task import SchedulerTask, TaskType\n\nDEFAULT_TIMEOUT = 10 # seconds\nDEFAULT_SIZE = 5 # number of tasks to run in parallel\n\n\nclass ProcessManager(metaclass=Singleton):\n \"\"\"\n Main handler for each runner, which is responsible for scheduling the task.\n \"\"\"\n\n def __init__(self, size: int = DEFAULT_SIZE, timeout: int = DEFAULT_TIMEOUT):\n self.realtime_runner = StandardRunner(1)\n self.standard_runner = StandardRunner(size)\n self.timeout = timeout\n\n def schedule_with_runner(self, task: SchedulerTask, mode: TaskType):\n \"\"\"\n Schedule a task with the corresponding runner for the given mode.\n \"\"\"\n if mode == TaskType.REALTIME:\n return self.realtime_runner.schedule(Process(target=task.target), task.timeout)\n elif mode == TaskType.STANDARD:\n return self.standard_runner.schedule(Process(target=task.target), task.timeout)\n else:\n raise ValueError(f'Invalid mode {mode}')\n\n def add_task(self, start: datetime, target: callable, mode: TaskType) -> None:\n task = SchedulerTask(start,\n target,\n self.timeout)\n self.schedule_with_runner(task, mode)\n\n async def run(self, scheduler: Service, period: int, mode: TaskType):\n done = asyncio.Event()\n\n def shutdown():\n done.set()\n self.shutdown()\n asyncio.get_event_loop().stop()\n\n asyncio.get_event_loop().add_signal_handler(signal.SIGINT, shutdown)\n\n while not done.is_set():\n self.add_task(datetime.now(), scheduler, mode)\n if period == 0:\n # random case #\n await asyncio.sleep(randint(1, 10))\n else:\n await asyncio.sleep(period)\n\n def shutdown(self):\n \"\"\"\n Callback for shutting down the process manager.\n \"\"\"\n self.realtime_runner.terminate_all()\n self.standard_runner.terminate_all()\n\n\ndef setup_with(mode: SchedulerModes):\n # Setup scheduler mode\n try:\n mode = SchedulerModes[config.mode.upper()]\n except KeyError:\n raise ValueError('Mode is Invalid!')\n\n def decorator_setup(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n pm = func(*args, **kwargs)\n if mode is SchedulerModes.OPERATION:\n pm.size = 1\n else:\n pm.size = config.process_manager.size\n return pm\n return wrapper\n return decorator_setup\n\n\n@setup_with(config.mode)\ndef setup_manager():\n \"\"\"Setup the manager based on the mode using setup_with decorator.\n\n Default values:\n TIMEOUT = 10 seconds\n SIZE = 5 task at the same time (Not valid for Operation).\n\n Returns:\n ProcessManager: Default Process Manager if timeout is not set.\n \"\"\"\n if config.process_manager.timeout:\n return ProcessManager(timeout=config.process_manager.timeout)\n return ProcessManager()\n", "sub_path": "scheduler/process_manager/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 3559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scheduler.core.meta.Singleton", "line_number": 23, "usage_type": "name"}, {"api_name": "runner.StandardRunner", "line_number": 29, "usage_type": "call"}, {"api_name": "runner.StandardRunner", "line_number": 30, "usage_type": "call"}, {"api_name": "task.SchedulerTask", "line_number": 33, "usage_type": "name"}, {"api_name": "task.TaskType", "line_number": 33, "usage_type": "name"}, {"api_name": "task.TaskType.REALTIME", "line_number": 37, "usage_type": "attribute"}, {"api_name": "task.TaskType", "line_number": 37, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 38, "usage_type": "call"}, {"api_name": "task.target", "line_number": 38, "usage_type": "attribute"}, {"api_name": "task.timeout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "task.TaskType.STANDARD", "line_number": 39, "usage_type": "attribute"}, {"api_name": "task.TaskType", "line_number": 39, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 40, "usage_type": "call"}, {"api_name": "task.target", "line_number": 40, "usage_type": "attribute"}, {"api_name": "task.timeout", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "task.TaskType", "line_number": 44, "usage_type": "name"}, {"api_name": "task.SchedulerTask", "line_number": 45, "usage_type": "call"}, {"api_name": "scheduler.core.service.service.Service", "line_number": 50, "usage_type": "name"}, {"api_name": "task.TaskType", "line_number": 50, "usage_type": "name"}, {"api_name": "asyncio.Event", "line_number": 51, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 56, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 58, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "scheduler.core.meta", "line_number": 61, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "scheduler.core.service.modes.SchedulerModes", "line_number": 76, "usage_type": "name"}, {"api_name": "scheduler.core.service.modes.SchedulerModes", "line_number": 79, "usage_type": "name"}, {"api_name": "scheduler.config.config.mode.upper", "line_number": 79, "usage_type": "call"}, {"api_name": "scheduler.config.config.mode", "line_number": 79, "usage_type": "attribute"}, {"api_name": "scheduler.config.config", "line_number": 79, "usage_type": "name"}, {"api_name": "scheduler.core.service.modes.SchedulerModes.OPERATION", "line_number": 87, "usage_type": "attribute"}, {"api_name": "scheduler.core.service.modes.SchedulerModes", "line_number": 87, "usage_type": "name"}, {"api_name": "scheduler.config.config.process_manager", "line_number": 90, "usage_type": "attribute"}, {"api_name": "scheduler.config.config", "line_number": 90, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 84, "usage_type": "call"}, {"api_name": "scheduler.config.config.process_manager", "line_number": 107, "usage_type": "attribute"}, {"api_name": "scheduler.config.config", "line_number": 107, "usage_type": "name"}, {"api_name": "scheduler.config.config.process_manager", "line_number": 108, "usage_type": "attribute"}, {"api_name": "scheduler.config.config", "line_number": 108, "usage_type": "name"}, {"api_name": "scheduler.config.config.mode", "line_number": 96, "usage_type": "attribute"}, {"api_name": "scheduler.config.config", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "116823625", "text": "import csv\r\nimport io\r\nfrom prestapyt import PrestaShopWebServiceDict\r\n\r\n\r\n# pip install --ignore-installed git+https://github.com/prestapyt/prestapyt.git@master\r\n\r\n\r\n# Add category to prestashop\r\ndef create_category(prestashop, blank_category, names, links, deep):\r\n for i in range(0, len(names)):\r\n blank_category.update({'category': {\r\n 'id_parent': deep,\r\n 'active': '1',\r\n 'name': {\r\n 'language': [{'attrs': {'id': '2'}, 'value': names[i]}]},\r\n 'link_rewrite': {\r\n 'language': [{'attrs': {'id': '2'}, 'value': links[i]}]}\r\n }})\r\n prestashop.add('categories', blank_category)\r\n\r\n\r\n# Add category tree to prestashop\r\ndef create_category_tree(prestashop):\r\n main_category_name = [\"Filmy\"]\r\n categories_name = [\"DVD\", \"Blu-Ray\"]\r\n subcategories_name = [\"Animowane/Familijne\", \"Dokumentalne\", \"Dramat\", \"Fantasy/Sci-Fi\", \"Horror/Thriller\",\r\n \"Komedia/Komedia Romantyczna\", \"Muzyczne/Musicale\", \"Sensacyjne/Przygodowe\"]\r\n\r\n main_category_link = [\"filmy\"]\r\n categories_link = [\"filmy-dvd\", \"filmy-blu-ray\"]\r\n subcategories_link = [\"animowanefamilijne\", \"dokumentalne\", \"dramat\", \"fantasysci-fi\", \"horrorthriller\",\r\n \"komediakomedia-romantyczna\", \"muzycznemusicale\", \"sensacyjneprzygodowe\"]\r\n\r\n blank_category = prestashop.get('categories', options={'schema': 'blank'})\r\n\r\n print(blank_category)\r\n\r\n # Film\r\n create_category(prestashop, blank_category, main_category_name, main_category_link, 2)\r\n # Categories\r\n create_category(prestashop, blank_category, categories_name, categories_link, 1000)\r\n # Subcategories\r\n create_category(prestashop, blank_category, subcategories_name, subcategories_link, 1001)\r\n create_category(prestashop, blank_category, subcategories_name, subcategories_link, 1002)\r\n\r\n\r\n# Add image to product\r\ndef add_image(prestashop, image_id, product_id):\r\n file_name = 'images/' + str(image_id) + '.jpeg'\r\n fd = io.open(file_name, \"rb\")\r\n content = fd.read()\r\n fd.close()\r\n prestashop.add('/images/products/' + str(product_id), files=[('image', file_name, content)])\r\n\r\n\r\n# Add attributes and quantities to product\r\ndef add_combinations(prestashop, id_product):\r\n\r\n # Attributes\r\n # PL\r\n blank_combination = prestashop.get('combinations', options={'schema': 'blank'})\r\n blank_combination.update({'combination': {\r\n 'id_product': str(id_product),\r\n 'minimal_quantity': '1',\r\n 'associations': {'product_option_values': {'product_option_value': {'id': '1'}}}}}\r\n )\r\n prestashop.add('combinations', blank_combination)\r\n # EN\r\n blank_combination.update({'combination': {\r\n 'id_product': str(id_product),\r\n 'minimal_quantity': '1',\r\n 'associations': {'product_option_values': {'product_option_value': {'id': '2'}}}}}\r\n )\r\n prestashop.add('combinations', blank_combination)\r\n\r\n # Quantities\r\n blank_stock_available = prestashop.get('stock_availables', id_product * 3 - 1)\r\n blank_stock_available['stock_available']['quantity'] = 50\r\n prestashop.edit('stock_availables', blank_stock_available)\r\n\r\n blank_stock_available = prestashop.get('stock_availables', id_product * 3)\r\n blank_stock_available['stock_available']['quantity'] = 50\r\n prestashop.edit('stock_availables', blank_stock_available)\r\n\r\n\r\n# Add products to prestashop\r\ndef add_products(prestashop):\r\n\r\n dict = {}\r\n product_features_dict = {}\r\n features, names = get_features()\r\n\r\n with open('products.csv', encoding=\"utf8\") as csvfile:\r\n products = list(csv.reader(csvfile, delimiter=\";\"))\r\n blank_product = prestashop.get('products', options={'schema': 'blank'})\r\n\r\n for i in range(0, 568):\r\n print(i)\r\n\r\n # Categories\r\n categories = []\r\n dict[\"id\"] = products[i][3].split(\"|\")[0]\r\n categories.append(dict.copy())\r\n for j in range(1, len(products[i][3].split(\"|\"))):\r\n dict[\"id\"] = products[i][3].split(\"|\")[j]\r\n categories.append(dict.copy())\r\n id_category_default = [x['id'] for x in categories]\r\n\r\n # Features\r\n product_features = []\r\n for product_feature in products[i][6].split('|'):\r\n for k, feature_name in enumerate(names):\r\n if feature_name == product_feature.split('@')[0]:\r\n product_features_dict[\"id\"] = k + 1\r\n break\r\n for k, feature in enumerate(features):\r\n if feature == product_feature:\r\n product_features_dict[\"id_feature_value\"] = k + 1\r\n break\r\n product_features.append(product_features_dict.copy())\r\n\r\n # Product\r\n blank_product.update({'product': {\r\n 'id_manufacturer': '0',\r\n 'id_default_combination': '1',\r\n 'id_category_default': max(id_category_default),\r\n 'id_tax_rules_group': '1',\r\n 'reference': '1438245'+str(i),\r\n 'supplier_reference': '982473182',\r\n 'state': '1',\r\n 'on_sale': '0',\r\n 'price': str(round(float(products[i][4]), 3)),\r\n 'wholesale_price': products[i][5],\r\n 'customizable': '1',\r\n 'active': '1',\r\n 'show_condition': '1',\r\n 'condition': 'new',\r\n 'show_price': '1',\r\n 'visibility': 'both',\r\n 'available_for_order': '1',\r\n 'link_rewrite': {'language': {'attrs': {'id': '2'}, 'value': products[i][2]}},\r\n 'name': {'language': [{'attrs': {'id': '1'}, 'value': products[i][1]},\r\n {'attrs': {'id': '2'}, 'value': products[i][1]}]},\r\n 'description': {'language': {'attrs': {'id': '2'}, 'value':products[i][7]}},\r\n 'description_short': {'language': {'attrs': {'id': '2'}, 'value': 'Film'}},\r\n 'available_now': {'language': {'attrs': {'id': '2'}, 'value': 'Produkt dostępny'}},\r\n 'available_later': {'language': {'attrs': {'id': '2'}, 'value': 'Zamówienie dozwolone'}},\r\n 'associations': {\r\n 'categories': {'attrs': {'nodeType': 'category', 'api': 'categories'}, 'category': categories},\r\n #'combinations': {'attrs': {'nodeType': 'combination', 'api': 'combinations'},'combination': [{'id': '1'}, {'id': '2'}]},\r\n #'product_option_values': {'attrs': {'nodeType': 'product_option_value', 'api': 'product_option_values'},'product_option_value': [{'id': '1'}, {'id': '2'}]},\r\n 'product_features': {'attrs': {'nodeType': 'product_feature', 'api': 'product_features'},'product_feature': product_features},\r\n #'stock_availables': {'attrs': {'nodeType': 'stock_available', 'api': 'stock_availables'},'stock_available': [{'id': '869', 'id_product_attribute': '0'},{'id': '1125', 'id_product_attribute': '1'},{'id': '1126', 'id_product_attribute': '2'}]},\r\n }}}\r\n\r\n )\r\n prestashop.add('products', blank_product)\r\n add_image(prestashop, products[i][0], i)\r\n add_combinations(prestashop, i)\r\n\r\n\r\n# Add features to prestashop\r\ndef add_features(names, prestashop):\r\n blank = prestashop.get('product_features', options={'schema': 'blank'})\r\n for name in names:\r\n blank.update({'product_feature': {\r\n 'name': {'language': {'attrs': {'id': '2'}, 'value': name}}}\r\n })\r\n prestashop.add('product_features', blank)\r\n return names\r\n\r\n\r\n# Add feature values to prestashop\r\ndef add_values(prestashop, features, names):\r\n id_feature = 1\r\n blank = prestashop.get('product_feature_values', options={'schema': 'blank'})\r\n for i, feature in enumerate(features):\r\n print(i)\r\n for j, name in enumerate(names):\r\n if feature.split(\"@\")[0] == name:\r\n id_feature = j + 1\r\n break\r\n blank.update({'product_feature_value': {\r\n 'id_feature': str(id_feature),\r\n 'value': {'language': {'attrs': {'id': '2'}, 'value': feature.split(\"@\")[1]}}}\r\n })\r\n prestashop.add('product_feature_values', blank)\r\n\r\n\r\n# Get features and feature values from products.csv\r\ndef get_features():\r\n features = []\r\n names = []\r\n with open('products.csv', encoding=\"utf8\") as csvfile:\r\n products = list(csv.reader(csvfile, delimiter=\";\"))\r\n for i in range(1, len(products)):\r\n features += products[i][6].split('|')\r\n\r\n features = list(dict.fromkeys(features))\r\n for i, atr in enumerate(features):\r\n names.append(atr.split('@')[0])\r\n names = list(dict.fromkeys(names))\r\n return features, names\r\n\r\n\r\n# Add feature tree to prestashop\r\ndef create_feature_tree(prestashop):\r\n features, names = get_features()\r\n add_features(names, prestashop)\r\n add_values(prestashop, features, names)\r\n\r\n\r\ndef main():\r\n prestashop = PrestaShopWebServiceDict('http://efilmy.best/api',\r\n 'AZ2A2PZC183CQIEHI8KR3SC48E8CTA7T', )\r\n while 1:\r\n print(\"1 Create Category Tree\")\r\n print(\"2 Add features\")\r\n print(\"3 Add products\")\r\n print(\"4 Exit\")\r\n x = input()\r\n if x == '1':\r\n create_category_tree(prestashop)\r\n elif x == '2':\r\n create_feature_tree(prestashop)\r\n elif x == '3':\r\n add_products(prestashop)\r\n elif x == '4':\r\n break\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "sub_path": "initialize_products.py", "file_name": "initialize_products.py", "file_ext": "py", "file_size_in_byte": 9803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "io.open", "line_number": 51, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 95, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 196, "usage_type": "call"}, {"api_name": "prestapyt.PrestaShopWebServiceDict", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "568125047", "text": "from aiohttp.test_utils import AioHTTPTestCase\nfrom aiologger.loggers.json import JsonLogger\nfrom asynctest import patch, Mock, MagicMock\n\nfrom app.api import Api\n\n\nclass AppBaseTest(AioHTTPTestCase):\n async def get_application(self):\n api = Api()\n api.app[\"mongo_db\"] = MagicMock()\n return api.app\n\n def setUp(self):\n self.logger = Mock(spec=JsonLogger)\n self.logger_patch = patch(\n \"app.api.JsonLogger.with_default_handlers\", return_value=self.logger\n )\n self.logger_patch.start()\n super().setUp()\n\n def tearDown(self):\n self.logger_patch.stop()\n self.loop.run_until_complete(self.clear_mongo())\n super().tearDown()\n\n async def clear_mongo(self):\n collections = await self.app[\"mongo_db\"].list_collection_names()\n for collection in collections:\n await self.app[\"mongo_db\"][collection].drop()\n", "sub_path": "tests/base_tests.py", "file_name": "base_tests.py", "file_ext": "py", "file_size_in_byte": 919, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "aiohttp.test_utils.AioHTTPTestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "app.api.Api", "line_number": 10, "usage_type": "call"}, {"api_name": "asynctest.MagicMock", "line_number": 11, "usage_type": "call"}, {"api_name": "asynctest.Mock", "line_number": 15, "usage_type": "call"}, {"api_name": "aiologger.loggers.json.JsonLogger", "line_number": 15, "usage_type": "name"}, {"api_name": "asynctest.patch", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "27939288", "text": "import json\nimport re\n\nf = open('jawiki-country.json')\nflist = list(f)\n\nengland = \"\"\nfor item in flist:\n jsonData = json.loads(item)\n if jsonData[\"title\"] == \"イギリス\":\n england = jsonData[\"text\"]\n\nlines = england.split('\\n')\nflag = False\n\ndic = {}\nfor line in lines:\n if flag:\n if re.search('\\|(.+)\\=', line):\n m = re.search('\\|(.+)\\=', line)\n key = line[m.start():m.end()]\n key = key[1:-2]\n value = line[m.end():]\n dic[key] = value\n\n if re.search('\\{\\{基礎情報', line):\n flag = True\n \n if re.search('^\\}\\}$', line):\n flag = False\n\nprint(dic)", "sub_path": "chapter3/25.py", "file_name": "25.py", "file_ext": "py", "file_size_in_byte": 653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "re.search", "line_number": 19, "usage_type": "call"}, {"api_name": "re.search", "line_number": 20, "usage_type": "call"}, {"api_name": "re.search", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "317150509", "text": "from __future__ import print_function\n\nfrom __future__ import absolute_import\nimport ase.io\nimport ipywidgets as ipw\nfrom fileupload import FileUploadWidget\nimport tempfile\nimport nglview\nfrom six.moves import zip\n\ndef get_example_structure(key):\n from ase.io import read\n return read('miscellaneous/structures/' + key)\n\n\nclass StructureUploadWidget(ipw.VBox):\n\n DATA_FORMATS = ('StructureData', 'CifData')\n\n def __init__(self, text=\"Upload Structure\", **kwargs):\n \"\"\" Upload a structure and store it in AiiDA database.\n\n :param text: Text to display before upload button\n :type text: str\n \"\"\"\n\n self.file_upload = FileUploadWidget(text)\n structures = {\n \"Select structure\": False,\n }\n self.structure_select = ipw.Dropdown(\n options=[],\n description='Or choose from examples:',\n style={'description_width': '160px'},\n disabled=False)\n self.viewer = nglview.NGLWidget()\n self.btn_store = ipw.Button(\n description='Store in AiiDA', disabled=True)\n self.structure_description = ipw.Text(\n placeholder=\"Description (optional)\")\n\n self.structure_ase = None\n select = ipw.HBox([self.file_upload, self.structure_select])\n store = ipw.HBox([self.btn_store, self.structure_description])\n children = [select, self.viewer, store]\n\n super(StructureUploadWidget, self).__init__(\n children=children, **kwargs)\n\n self.file_upload.observe(self._on_file_upload, names='data')\n self.structure_select.observe(self._on_structure_select, names=['value'])\n self.btn_store.on_click(self._on_click_store)\n\n from aiida import load_dbenv, is_dbenv_loaded\n from aiida.backends import settings\n if not is_dbenv_loaded():\n load_dbenv(profile=settings.AIIDADB_PROFILE)\n\n # pylint: disable=unused-argument\n def _on_file_upload(self, change):\n self.tmp_folder = tempfile.mkdtemp()\n tmp = self.tmp_folder + '/' + self.file_upload.filename\n with open(tmp, 'w') as f:\n f.write(self.file_upload.data)\n structure_ase = self.get_ase(self.tmp_folder + '/' + self.file_upload.filename)\n self.select_structure(s=structure_ase, name=self.file_upload.filename)\n\n def _on_structure_select(self, change):\n global atoms\n indx = change['owner'].index\n atoms = change['new']\n if atoms is False:\n self.select_structure(s=None, name=None)\n return None\n formula = atoms.get_chemical_formula()\n self.select_structure(s=atoms, name=formula)\n\n\n def select_structure(self, s, name):\n self.btn_store.disabled = False\n if s is None:\n self.structure_ase = None\n self.btn_store.disabled = True\n self.structure_description.value = \"\"\n self.refresh_view()\n return\n\n self.structure_description.value = self.get_description(\n s, name)\n self.structure_ase = s\n self.refresh_view()\n\n def get_ase(self, fname):\n try:\n traj = ase.io.read(fname, index=\":\")\n except AttributeError:\n print(\"Looks like {} file does not contain structure coordinates\".\n format(fname))\n return None\n if len(traj) > 1:\n print(\n \"Warning: Uploaded file {} contained more than one structure. I take the first one.\"\n .format(fname))\n return traj[0]\n\n def get_description(self, structure_ase, name):\n formula = structure_ase.get_chemical_formula()\n return \"{} ({})\".format(formula, name)\n\n def refresh_view(self):\n viewer = self.viewer\n # Note: viewer.clear() only removes the 1st component\n # pylint: disable=protected-access\n for comp_id in viewer._ngl_component_ids:\n viewer.remove_component(comp_id)\n\n if self.structure_ase is None:\n return\n\n viewer.add_component(nglview.ASEStructure(\n self.structure_ase)) # adds ball+stick\n viewer.add_unitcell()\n\n # pylint: disable=unused-argument\n def _on_click_store(self, change):\n self.store_structure(\n self.file_upload.filename,\n description=self.structure_description.value)\n\n def store_structure(self, name, description=None):\n structure_ase = self.structure_ase\n if structure_ase is None:\n return\n\n from aiida.orm.data.structure import StructureData\n self.structure_node = StructureData(ase=structure_ase)\n if description is None:\n self.structure_node.description = self.get_description(\n structure_ase, name)\n else:\n self.structure_node.description = description\n self.structure_node.label = \".\".join(name.split('.')[:-1])\n self.structure_node.store()\n print(\"Stored in AiiDA: \" + repr(self.structure_node))\n\n#EOF\n", "sub_path": "common/structure/upload.py", "file_name": "upload.py", "file_ext": "py", "file_size_in_byte": 5061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "ase.io.read", "line_number": 13, "usage_type": "call"}, {"api_name": "ipywidgets.VBox", "line_number": 16, "usage_type": "attribute"}, {"api_name": "fileupload.FileUploadWidget", "line_number": 27, "usage_type": "call"}, {"api_name": "ipywidgets.Dropdown", "line_number": 31, "usage_type": "call"}, {"api_name": "nglview.NGLWidget", "line_number": 36, "usage_type": "call"}, {"api_name": "ipywidgets.Button", "line_number": 37, "usage_type": "call"}, {"api_name": "ipywidgets.Text", "line_number": 39, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 43, "usage_type": "call"}, {"api_name": "ipywidgets.HBox", "line_number": 44, "usage_type": "call"}, {"api_name": "aiida.is_dbenv_loaded", "line_number": 56, "usage_type": "call"}, {"api_name": "aiida.load_dbenv", "line_number": 57, "usage_type": "call"}, {"api_name": "aiida.backends.settings.AIIDADB_PROFILE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "aiida.backends.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 61, "usage_type": "call"}, {"api_name": "ase.io.io.read", "line_number": 95, "usage_type": "call"}, {"api_name": "ase.io.io", "line_number": 95, "usage_type": "attribute"}, {"api_name": "ase.io", "line_number": 95, "usage_type": "name"}, {"api_name": "nglview.ASEStructure", "line_number": 120, "usage_type": "call"}, {"api_name": "aiida.orm.data.structure.StructureData", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "89655003", "text": "from pyspark.sql import SparkSession\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.sql.functions import col\n\nimport pandas as pd\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pyspark.ml.feature import VectorAssembler\nfrom pyspark.ml.clustering import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyspark.sql import SQLContext\n\n# ReadFile\nFEATURES_COL = ['Pace', 'Reb Rate', 'Pts', 'Opp Pts']\npath = 'data/3years.csv'\nspark = SparkSession.builder.appName('NBA-Analysis').getOrCreate()\ndata = spark.read.csv(path, header=True, inferSchema=True)\ndata.printSchema()\n\nvecAssembler = VectorAssembler(inputCols=FEATURES_COL, outputCol=\"features\")\ndf_kmeans = vecAssembler.transform(data).select('Team', 'features')\ndf_kmeans.show()\n\ncost = np.zeros(20)\nfor k in range(2, 20):\n kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol(\"features\")\n model = kmeans.fit(df_kmeans.sample(False, 0.1, seed=42))\n cost[k] = model.computeCost(df_kmeans)\n\nplt.interactive(True)\nfig, ax = plt.subplots(1, 1, figsize=(8, 6))\nax.plot(range(2, 20), cost[2:20])\nax.set_xlabel('k')\nax.set_ylabel('cost')\nplt.ioff()\nfig.show()\nplt.savefig('K_Selection.png')\n\nk = 5\nkmeans = KMeans().setK(k).setSeed(1).setFeaturesCol(\"features\")\nmodel = kmeans.fit(df_kmeans)\ncenters = model.clusterCenters()\n\nprint(\"Cluster Centers: \")\nfor center in centers:\n print(center)\n\ntransformed = model.transform(df_kmeans).select('Team', 'prediction')\nrows = transformed.collect()\nprint(rows[:3])\n\ndf_pred = data.join(transformed, 'Team')\npddf_pred = df_pred.toPandas().set_index('Team')\n\nthreedee = plt.figure(figsize=(12, 10)).gca(projection='3d')\nthreedee.scatter(pddf_pred['Pts'], pddf_pred['Opp Pts'], pddf_pred['Reb Rate'], s=20,\n c=pddf_pred.prediction)\nthreedee.set_xlabel('Pts')\nthreedee.set_ylabel('Opp Pts')\nthreedee.set_zlabel('Reb Rate')\nplt.interactive(True)\nplt.ioff()\nplt.show()\nplt.savefig('KMeans.png')\n", "sub_path": "Clustering/ClusterExample.py", "file_name": "ClusterExample.py", "file_ext": "py", "file_size_in_byte": 1965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 17, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 17, "usage_type": "name"}, {"api_name": "pyspark.ml.feature.VectorAssembler", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "pyspark.ml.clustering.KMeans", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.interactive", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ioff", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "pyspark.ml.clustering.KMeans", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.interactive", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ioff", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "217240047", "text": "import git, glob, pytest, sqlparse, os\nfrom automig.lib import githelp, ref_diff, diffing\nfrom .test_diffing import ARGS\n\nSHAS = {\n 'create-t1': '2801578',\n 'add-t1-col': '2ff9297cb26c9491c159af728ad6734ad06f8542',\n 'add-t2-t1a': 'f8b1048fd12b6ef41568801867b67d3ca74904f3',\n 'unsup-alter-col': 'c479bb0',\n}\nGLOB ='test/schema/*.sql'\n\ndef test_get_paths():\n repo = git.Repo()\n tree = repo.commit(SHAS['create-t1']).tree\n assert githelp.get_paths(tree, os.path.join(repo.working_dir, GLOB)) == ['test/schema/sql.sql']\n\n@pytest.mark.skip\ndef test_create():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t1-col'], GLOB)\n raise NotImplementedError\n\ndef test_addcol():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t1-col'], GLOB)\n assert diff == {\n SHAS['add-t1-col']: {'t1': ['alter table t1 add column b int;']},\n }\n\ndef test_add_multi_commit():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t2-t1a'], GLOB)\n assert diff == {\n SHAS['add-t1-col']: {\n 't1': ['alter table t1 add column b int;'],\n },\n SHAS['add-t2-t1a']: {\n 't1': ['create index t1a on t1 (a);'],\n 't2': ['create table t2 (a int primary key);'],\n },\n }\n\ndef test_add_multi_commit_opaque():\n diff = ref_diff.ref_range_diff(ARGS, git.Repo(), SHAS['create-t1'], SHAS['add-t2-t1a'], GLOB, opaque=True)\n assert diff == {SHAS['add-t2-t1a']: {\n 't1': ['alter table t1 add column b int;', 'create index t1a on t1 (a);'],\n 't2': ['create table t2 (a int primary key);'],\n }}\n\nMOD_COLUMN = [\n 'create table t1 (a int primary key, b int);',\n 'create table t1 (a int primary key, b int unique);',\n]\n\ndef test_error_bubbling():\n sha_table_stmts = {'sha': diffing.diff(ARGS, *map(sqlparse.parse, MOD_COLUMN))}\n errors = ref_diff.extract_errors(sha_table_stmts)\n manual = {'sha': {'t1': ['hello']}}\n remaining = ref_diff.try_repair_errors(errors, manual, sha_table_stmts)\n assert not remaining\n assert sha_table_stmts['sha']['t1'] == ['hello']\n", "sub_path": "test/test_git.py", "file_name": "test_git.py", "file_ext": "py", "file_size_in_byte": 2051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "git.Repo", "line_number": 14, "usage_type": "call"}, {"api_name": "automig.lib.githelp.get_paths", "line_number": 16, "usage_type": "call"}, {"api_name": "automig.lib.githelp", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "automig.lib.ref_diff.ref_range_diff", "line_number": 20, "usage_type": "call"}, {"api_name": "test_diffing.ARGS", "line_number": 20, "usage_type": "argument"}, {"api_name": "automig.lib.ref_diff", "line_number": 20, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}, {"api_name": "automig.lib.ref_diff.ref_range_diff", "line_number": 24, "usage_type": "call"}, {"api_name": "test_diffing.ARGS", "line_number": 24, "usage_type": "argument"}, {"api_name": "automig.lib.ref_diff", "line_number": 24, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 24, "usage_type": "call"}, {"api_name": "automig.lib.ref_diff.ref_range_diff", "line_number": 30, "usage_type": "call"}, {"api_name": "test_diffing.ARGS", "line_number": 30, "usage_type": "argument"}, {"api_name": "automig.lib.ref_diff", "line_number": 30, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 30, "usage_type": "call"}, {"api_name": "automig.lib.ref_diff.ref_range_diff", "line_number": 42, "usage_type": "call"}, {"api_name": "test_diffing.ARGS", "line_number": 42, "usage_type": "argument"}, {"api_name": "automig.lib.ref_diff", "line_number": 42, "usage_type": "name"}, {"api_name": "git.Repo", "line_number": 42, "usage_type": "call"}, {"api_name": "automig.lib.diffing.diff", "line_number": 54, "usage_type": "call"}, {"api_name": "test_diffing.ARGS", "line_number": 54, "usage_type": "argument"}, {"api_name": "automig.lib.diffing", "line_number": 54, "usage_type": "name"}, {"api_name": "sqlparse.parse", "line_number": 54, "usage_type": "attribute"}, {"api_name": "automig.lib.ref_diff.extract_errors", "line_number": 55, "usage_type": "call"}, {"api_name": "automig.lib.ref_diff", "line_number": 55, "usage_type": "name"}, {"api_name": "automig.lib.ref_diff.try_repair_errors", "line_number": 57, "usage_type": "call"}, {"api_name": "automig.lib.ref_diff", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "497696405", "text": "#!/usr/bin/env python\n\nimport logging\nimport tornado\nimport tornado.web\nfrom tornado import httpserver\nfrom tornado import ioloop\nfrom tornado import websocket\n\nimport os\nimport sys\nimport json\nimport webbrowser\nimport nbformat\nfrom queue import Queue\n\nfrom execute import ThreadedExecutor\nfrom cells import Notebook\n\n\nSTATIC_PATH = os.path.join(os.path.split(__file__)[0], '..', 'client')\n\n\ndef serialize_binary_message(msg):\n \"\"\"serialize a message as a binary blob\n Header:\n 4 bytes: number of msg parts (nbufs) as 32b int\n 4 * nbufs bytes: offset for each buffer as integer as 32b int\n Offsets are from the start of the buffer, including the header.\n Returns\n -------\n The message serialized to bytes.\n \"\"\"\n from jupyter_client.jsonutil import date_default\n import struct\n # don't modify msg or buffer list in-place\n msg = msg.copy()\n buffers = list(msg.pop('buffers'))\n if sys.version_info < (3, 4):\n buffers = [x.tobytes() for x in buffers]\n bmsg = json.dumps(msg, default=date_default).encode('utf8')\n buffers.insert(0, bmsg)\n nbufs = len(buffers)\n offsets = [4 * (nbufs + 1)]\n for buf in buffers[:-1]:\n offsets.append(offsets[-1] + len(buf))\n offsets_buf = struct.pack('!' + 'I' * (nbufs + 1), nbufs, *offsets)\n buffers.insert(0, offsets_buf)\n return b''.join(buffers)\n\n\nclass PeriodicOutputCallback(object):\n \"\"\"\n Sets up a periodic callback to push output to cells by polling from\n the queue pushed to by the ThreadedExecutor.\n \"\"\"\n\n def __init__(self, server, notebook, period=20):\n self.server = server\n self.notebook = notebook\n self.period = period\n\n\n def start(self):\n self.callback = ioloop.PeriodicCallback(self.__call__, self.period)\n self.callback.start()\n\n def stop(self):\n self.callback.stop()\n\n def __call__(self):\n \"Processes queue pushed to by ThreadedExecutor\"\n try:\n val = self.server.queue.get_nowait()\n self.server.queue.task_done()\n result, status = val\n except:\n return\n\n connection = (self.server.BROWSER_CONNECTIONS[0]\n if self.server.BROWSER_CONNECTIONS else None)\n\n if connection and (status == 'comm_open'):\n print(\"REQUEST TO OPEN COMM FOR JS: %s\" % result) # TODO: buffers\n self.notebook.message(connection, 'comm_open', result['content'])\n # e.g:\n # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc',\n # 'target_name': 'ZOO', 'target_module': None}\n return\n elif connection and (status == 'comm_msg'):\n buffers = result['buffers']\n if buffers == []:\n self.notebook.message(connection, 'comm_msg', # FIXME: redundant 'comm_msg'\n {'msg_type': 'comm_msg',\n 'content': result['content']})\n else:\n msg = {'msg_type': 'comm_msg',\n 'content': result['content']}\n connection.write_message(serialize_binary_message(# FIXME: use message method\n {'cmd':'comm_msg', 'args':msg, 'buffers': buffers}), binary=True)\n return\n\n\n else:\n outnode, execution_count = result, status\n\n if connection:\n cell = self.notebook.find_cell(execution_count)\n if cell is None: return # There may be no cell if running a silent execution\n position = self.notebook.cell_position(cell)\n\n if execution_count is None:\n return # silent execution before *any* output\n if outnode is None and (cell.prompt == execution_count):\n return # no need to update prompt for silent execution\n self.notebook.update_cell_outputs(connection, position, outnode)\n\n\nclass LabServer(websocket.WebSocketHandler):\n\n BROWSER_CONNECTIONS = []\n\n NOTEBOOK = None\n\n def open(self):\n self.queue = Queue()\n\n # Note that there are multiple LabServer instances and we want only one notebook!\n # (for now)\n if LabServer.NOTEBOOK is None:\n LabServer.NOTEBOOK = Notebook(ThreadedExecutor(\"threaded-kernel\", self.queue))\n LabServer.NOTEBOOK.STATIC_PATH = STATIC_PATH\n\n self.output_callback = PeriodicOutputCallback(self, LabServer.NOTEBOOK)\n self.output_callback.start()\n logging.info(\"Connection opened\")\n\n\n def on_message(self, message):\n \"Websocket on_message handler. Tracks connection type.\"\n logging.info(u\"Received message: {0}\".format(message))\n try:\n payload = json.loads(message)\n except Exception as e:\n logging.info('JSON parse exception: %s' % str(e))\n return\n\n if payload.get('init',False):\n if payload['init'] == 'browser':\n self.BROWSER_CONNECTIONS.append(self)\n logging.info('Added browser client connection')\n if len(LabServer.NOTEBOOK.cells) > 0:\n logging.info(\"Restart with previously opened notebook\")\n LabServer.NOTEBOOK.reload(self)\n # If you hit reload in the browser, the CSS needs to be re-sent\n LabServer.NOTEBOOK.update_style(self, css=None)\n return\n\n # SOME COMMANDS (e.g mirroring) should happen even without a browser tab open!\n connection = self.BROWSER_CONNECTIONS[0] if len(self.BROWSER_CONNECTIONS) else None\n LabServer.NOTEBOOK.dispatch(connection, payload)\n\n\n def check_origin(self, origin):\n return True\n\n def on_close(self):\n logging.info(\"ON_CLOSE\")\n if self in self.BROWSER_CONNECTIONS:\n self.BROWSER_CONNECTIONS.remove(self)\n\n self.output_callback.stop()\n\nif __name__ == \"__main__\":\n import tornado.options\n tornado.options.parse_command_line()\n\n\n html_handler = (r'/(.*)', tornado.web.StaticFileHandler,\n {'path': STATIC_PATH})\n\n\n tornado.web.Application([html_handler]).listen(8000)\n ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", LabServer)]))\n ws_server.listen(9999, \"127.0.0.1\")\n logging.info(\"STARTED: Server start listening\")\n ioloop.IOLoop.instance().start()\n", "sub_path": "server/labmode.py", "file_name": "labmode.py", "file_ext": "py", "file_size_in_byte": 6360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 39, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "jupyter_client.jsonutil.date_default", "line_number": 41, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 47, "usage_type": "call"}, {"api_name": "tornado.ioloop.PeriodicCallback", "line_number": 65, "usage_type": "call"}, {"api_name": "tornado.ioloop", "line_number": 65, "usage_type": "name"}, {"api_name": "tornado.websocket.WebSocketHandler", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tornado.websocket", "line_number": 119, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 126, "usage_type": "call"}, {"api_name": "cells.Notebook", "line_number": 131, "usage_type": "call"}, {"api_name": "execute.ThreadedExecutor", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 136, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 141, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 143, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 145, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 151, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 168, "usage_type": "call"}, {"api_name": "tornado.options.parse_command_line", "line_number": 176, "usage_type": "call"}, {"api_name": "tornado.options", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 179, "usage_type": "attribute"}, {"api_name": "tornado.web.Application", "line_number": 183, "usage_type": "call"}, {"api_name": "tornado.web", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tornado.httpserver.HTTPServer", "line_number": 184, "usage_type": "call"}, {"api_name": "tornado.httpserver", "line_number": 184, "usage_type": "name"}, {"api_name": "tornado.web.Application", "line_number": 184, "usage_type": "call"}, {"api_name": "tornado.web", "line_number": 184, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 186, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop.instance", "line_number": 187, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop", "line_number": 187, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 187, "usage_type": "name"}]} +{"seq_id": "119101883", "text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom forms import CommentForm\nfrom itertools import ifilter\nfrom models import *\nimport bitcoinrpc\nconn = bitcoinrpc.connect_to_local() #need to have a configuration file specified and need site_packages/bitcoinrpc/read_default_config() to look at config file\n#how do i connect to a remote wallet (to pay users)\n'''\nadd posts\nways to vote\nways to sort posts by 'upvotes'\ncreate a user\nchange/update password\n\n'''\n\ndef logout_view(request):\n logout(request)\n return redirect('myapp.views.index')\n \n\ndef profile(request):\n if request.user.is_authenticated():\n logged_in=True\n account_string=str(request.user)\n '''\n #shows balance on my local wallet\n bal_msg= \"Your balance is %f\" % (conn.getbalance(),)\n #validating an address\n btc_add=\"17Aze6KsuZasYk2XtRBX1ZtpVzhFZs9nU6\" #some coinbase add\n rv = conn.validateaddress(btc_add)\n if rv.isvalid:\n val_msg= \"The address that you provided is valid\"\n else:\n val_msg= \"The address that you provided is invalid, please correct\"\n #send bitcoin from your address\n if request.method == \"POST\":\n conn.sendtoaddress(btc_add, 0.0002)\n '''\n #https://en.bitcoin.it/wiki/Accounts_explained\n #1) Get account Address for the user. User clicks on the button, gets an address (on your wallet) that he can send BTC to and use to buy stuff with\n if request.method == \"POST\":\n new_add=conn.getnewaddress(account_string) #getaccountaddress gets same address each time, getnewaddress new address eachtime\n account_balance=conn.getbalance(account_string)\n account_addresses=conn.getaddressesbyaccount(account_string)\n else:\n logged_in=False\n return render(request, 'myapp/profile.html', locals())\n\ndef index(request):\n top_post = Post.objects.order_by('-post_date')[0]\n second_row = Post.objects.order_by('-post_date')[1:4]\n third_row = Post.objects.order_by('-post_date')[4:7]\n fourth_row = Post.objects.order_by('-post_date')[7:10]\n if request.user.is_authenticated():\n logged_in=True\n else:\n logged_in=False\n return render(request, 'myapp/index.html', locals())\n\ndef article(request,art_id,location):\n post=Post.objects.get(pk=art_id)\n if request.user.is_authenticated():\n logged_in=True\n form = CommentForm(request.POST or None)\n if request.method == \"POST\":\n if form.is_valid():\n temp = form.save(commit=False)\n parent = form['parent'].value()\n temp.user=request.user\n temp.post=post\n if parent == '':\n #Set a blank path then save it to get an ID\n temp.path = []\n temp.save()\n temp.path = [temp.id]\n location=temp.id\n else:\n #Get the parent node\n node = Comment.objects.get(id=parent)\n location=node.id\n temp.depth = node.depth + 1\n '''\n temp.path = node.path\n #Store parents path then apply comment ID\n temp.save()\n temp.path.append(temp.id)\n '''\n s = str(node.path) #modded for mysql\n temp.path = eval(s)\n #store parents path than apply comment ID\n temp.save()\n id= int(temp.id)\n temp.path.append(id)\n #Final save for parents and children\n temp.save()\n return redirect('myapp.views.article', art_id,location)\n #Retrieve all comments and sort them by path\n comment_tree = Comment.objects.filter(post=post).order_by('-path')\n else:\n logged_in=False\n comment_tree = Comment.objects.filter(post=post).order_by('-path')\n return render(request, 'myapp/article.html', locals())\n", "sub_path": "myapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4356, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "bitcoinrpc.connect_to_local", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "forms.CommentForm", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 99, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "476947368", "text": "\"\"\"\nTokenCheck API\n\"\"\"\n\nfrom werkzeug.exceptions import BadRequest\nfrom flask import Response\nfrom flask_restx import Resource\nfrom flask_login import current_user\n\nfrom app import API\nfrom app.services import (\n FormService,\n TokenService,\n GroupService,\n FormResultService\n)\n\n\nTOKEN_CHECK_NS = API.namespace('tokens/', description='TokenCheck APIs')\n\n\n@TOKEN_CHECK_NS.route(\"/check_token\")\nclass TokenCheckAPI(Resource):\n \"\"\"\n TokenCheck API\n\n url: '/tokens/{token}/check_token'\n methods: get\n \"\"\"\n\n @API.doc(\n responses={\n 204: 'No Content',\n 400: 'Invalid data'\n },\n params={\n 'token': 'token to check'\n }\n )\n #pylint: disable=no-self-use\n def get(self, token):\n \"\"\"\n Check whether token is valid\n\n :param token: token to check\n \"\"\"\n token_instance = TokenService.get_by_token(token)\n if token_instance is None:\n raise BadRequest('Wrong token')\n\n token_data = TokenService.decode_token_for_check(token)\n if token_data is None:\n raise BadRequest('Wrong token') # Not enough token segments\n\n is_correct, _ = TokenService.validate_data(token_data)\n if not is_correct:\n raise BadRequest('Wrong token') # Token isn't valid\n\n form_id = token_data.get('form_id')\n form = FormService.get_by_id(form_id)\n if form is None:\n raise BadRequest('Wrong token') # Form doesn't exist\n\n group_id = token_data.get('group_id')\n if group_id is not None:\n group = GroupService.get_by_id(group_id)\n if group is None:\n raise BadRequest('Wrong token') # Group doesn't exist\n\n return Response(status=204)\n\n\n@TOKEN_CHECK_NS.route(\"/check_user\")\nclass TokenUserCheckAPI(Resource):\n \"\"\"\n TokenUserCheck API\n\n url: '/tokens/{token}/check_user'\n methods: get\n \"\"\"\n\n @API.doc(\n responses={\n 204: 'No Content',\n 400: 'Invalid data'\n },\n params={\n 'token': 'token to form'\n }\n )\n #pylint: disable=no-self-use\n def get(self, token):\n \"\"\"\n Check whether user can answer to form by using this token\n :param token: token to check\n \"\"\"\n token_instance = TokenService.get_by_token(token)\n if token_instance is None:\n raise BadRequest(\"Token doesn't exist\")\n\n if current_user.is_authenticated:\n user_can_answer = FormResultService.check_whether_user_passed_form(\n user_id=current_user.id,\n token_id=token_instance.id,\n )\n if not user_can_answer:\n raise BadRequest(\"You have already passed this form using this token\")\n\n return Response(status=204)\n", "sub_path": "src/app/routers/token_check.py", "file_name": "token_check.py", "file_ext": "py", "file_size_in_byte": 2851, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "app.API.namespace", "line_number": 19, "usage_type": "call"}, {"api_name": "app.API", "line_number": 19, "usage_type": "name"}, {"api_name": "flask_restx.Resource", "line_number": 23, "usage_type": "name"}, {"api_name": "app.services.TokenService.get_by_token", "line_number": 47, "usage_type": "call"}, {"api_name": "app.services.TokenService", "line_number": 47, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 49, "usage_type": "call"}, {"api_name": "app.services.TokenService.decode_token_for_check", "line_number": 51, "usage_type": "call"}, {"api_name": "app.services.TokenService", "line_number": 51, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 53, "usage_type": "call"}, {"api_name": "app.services.TokenService.validate_data", "line_number": 55, "usage_type": "call"}, {"api_name": "app.services.TokenService", "line_number": 55, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 57, "usage_type": "call"}, {"api_name": "app.services.FormService.get_by_id", "line_number": 60, "usage_type": "call"}, {"api_name": "app.services.FormService", "line_number": 60, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 62, "usage_type": "call"}, {"api_name": "app.services.GroupService.get_by_id", "line_number": 66, "usage_type": "call"}, {"api_name": "app.services.GroupService", "line_number": 66, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "app.API.doc", "line_number": 31, "usage_type": "call"}, {"api_name": "app.API", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_restx.Resource", "line_number": 74, "usage_type": "name"}, {"api_name": "app.services.TokenService.get_by_token", "line_number": 97, "usage_type": "call"}, {"api_name": "app.services.TokenService", "line_number": 97, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 99, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 101, "usage_type": "name"}, {"api_name": "app.services.FormResultService.check_whether_user_passed_form", "line_number": 102, "usage_type": "call"}, {"api_name": "app.services.FormResultService", "line_number": 102, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 103, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 109, "usage_type": "call"}, {"api_name": "app.API.doc", "line_number": 82, "usage_type": "call"}, {"api_name": "app.API", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "228632363", "text": "import os\r\nimport sys\r\nimport mne\r\nimport numpy as np\r\n\r\n\r\ndef get_labels(subjects_dir, labels_dir, parc='aparc_sub'):\r\n labels = mne.read_labels_from_annot('fsaverage', parc=parc, subjects_dir=subjects_dir)\r\n label_files = sorted([f.path for f in os.scandir(labels_dir)])\r\n if len(label_files) < len(labels):\r\n for i, label in enumerate(labels):\r\n labels[i] = label.morph(subject_to='fsaverage', grade=4, subjects_dir=subjects_dir)\r\n labels[i].save(os.path.join(labels_dir, labels[i].name))\r\n else:\r\n labels = []\r\n for file in label_files:\r\n labels.append(mne.read_label(file, 'fsaverage'))\r\n return labels\r\n\r\n\r\ndef parcellate_stc(stc, labels, agg='mean'):\r\n parc_data = np.zeros((len(labels), stc.shape[-1]))\r\n\r\n for i, label in enumerate(labels):\r\n if label.name.startswith('unknown'):\r\n continue\r\n stc_in_label = stc.in_label(label)\r\n if agg == 'mean':\r\n parc_data[i] = np.mean(stc_in_label.data, axis=0)\r\n elif agg == 'max':\r\n parc_data[i] = np.max(stc_in_label.data, axis=0)\r\n else:\r\n raise RuntimeError('\"agg\" argument must be one of (\"mean\", \"max\")')\r\n\r\n return parc_data\r\n\r\n\r\ndef main(subj, task, data_dir, subjects_dir, ext='fsaverage', agg='mean', cohorts=None, window=False):\r\n psd_dir = os.path.join(data_dir, subj, 'psd')\r\n zmap_dir = os.path.join(data_dir, subj, 'zmap')\r\n output_dir = os.path.join(data_dir, subj, 'parc')\r\n os.makedirs(output_dir, exist_ok=True)\r\n\r\n if ext == 'zmap':\r\n input_dir = zmap_dir\r\n else:\r\n input_dir = psd_dir\r\n\r\n labels_dir = '/scratch/nbe/tbi-meg/veera/labels_aparc_sub'\r\n os.makedirs(labels_dir, exist_ok=True)\r\n labels = get_labels(subjects_dir, labels_dir)\r\n\r\n if window:\r\n for i in range(40, 390, 50):\r\n if cohorts is not None:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-{i}-{cohorts}-psd-{ext}')\r\n else:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-{i}-psd-{ext}')\r\n stc = mne.read_source_estimate(stc_fname, 'fsaverage')\r\n\r\n parc_stc_data = parcellate_stc(stc, labels, agg)\r\n\r\n outfile = os.path.join(output_dir, os.path.basename(stc_fname) + f'-{agg}-aparc-data.csv')\r\n print('Saving data to', outfile)\r\n np.savetxt(outfile, parc_stc_data, fmt='%.7f', delimiter=\",\")\r\n else:\r\n if cohorts is not None:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-{cohorts}-psd-{ext}')\r\n else:\r\n stc_fname = os.path.join(input_dir, f'{subj}-{task}-psd-{ext}')\r\n stc = mne.read_source_estimate(stc_fname, 'fsaverage')\r\n\r\n parc_stc_data = parcellate_stc(stc, labels, agg)\r\n\r\n outfile = os.path.join(output_dir, os.path.basename(stc_fname) + f'-{agg}-aparc-data.csv')\r\n print('Saving data to', outfile)\r\n np.savetxt(outfile, parc_stc_data, fmt='%.7f', delimiter=\",\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n subject = sys.argv[1]\r\n task = sys.argv[2]\r\n data_dir = sys.argv[3]\r\n subjects_dir = sys.argv[4]\r\n if len(sys.argv) > 5:\r\n cohorts = sys.argv[5]\r\n else:\r\n cohorts = None\r\n\r\n main(subject, task, data_dir, subjects_dir, ext='zmap', agg='mean', cohorts=cohorts, window=True)\r\n", "sub_path": "pipeline-tbi/parc/parcellation.py", "file_name": "parcellation.py", "file_ext": "py", "file_size_in_byte": 3359, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mne.read_labels_from_annot", "line_number": 8, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mne.read_label", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 42, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "mne.read_source_estimate", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "mne.read_source_estimate", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 83, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 84, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 86, "usage_type": "attribute"}]} +{"seq_id": "587220526", "text": "\"\"\"\nAdd peaks or remove peaks from the k-core histogram of a base graph and save the results\n\"\"\"\nfrom __future__ import division, print_function\nimport networkx as nx\nimport numpy as np\nfrom decomposition import kcore\nfrom experiments import kcore as kexp\nfrom experiments import histogram\nfrom noise import missing\nimport csv\n\nclass KCoreGraph(object):\n \"\"\"docstring for KCoreGraph.\"\"\"\n def __init__(self, base, sname, adjacency=False):\n super(KCoreGraph, self).__init__()\n self.sname = sname\n if adjacency:\n self.graph = nx.read_adjlist(base)\n else:\n self.graph = nx.read_edgelist(base)\n self.cnumber = kcore.KCore(self.graph).coreNumber()\n self.top = [1, 0.2, 0.1]\n self.ori_graph = self.graph.copy()\n\n def resetGraph(self):\n self.graph = self.ori_graph.copy()\n\n def removeNodes(self, nodes, count, save=False):\n remove = np.random.choice(nodes, size=count, replace=False)\n nodes = [n for n in nodes if n not in remove]\n self.graph.remove_nodes_from(remove)\n if save:\n nx.write_edgelist(self.graph, self.sname, delimiter='\\t', data=False)\n return self.graph, nodes\n\n def addPeak(self, k, nodes, edges, count, lst, ext_nodes):\n nodes += ['n_'+str(i) for i in xrange(lst,lst+count)]\n ni_count = np.random.randint(k,high=k+10,size=len(nodes))\n no_count = np.random.randint(0,high=10,size=len(nodes))\n for i, n in enumerate(nodes):\n candidates = list(nodes).remove(n)\n ni = np.random.choice(candidates, size=ni_count[i], replace=False)\n no = np.random.choice(ext_nodes, size=no_count[i], replace=False)\n edges += [(n, u) for u in ni]\n edges += [(n, u) for u in no]\n\n graph = self.graph.copy()\n graph.add_edge_from(edges)\n return graph, nodes, edges, lst + count\n\n def saveData(self, data, t='removed'):\n with open(self.sname, 'w') as f:\n writer = csv.writer(f,delimiter=',')\n header = [t+'_core', 'frac'] + ['error_'+str(i) for i in self.top] + \\\n ['std_'+str(i*10) for i in self.top] +\\\n ['core', 'count', 'degree', 'clustering', 'components']\n writer.writerow(header)\n for d in data:\n writer.writerow(d)\n\n def runExpRemoveNodes(self, k):\n \"\"\"\n Remove fractions of top k nodes\n \"\"\"\n step = 1\n fracs = [i for i in xrange(0, 20, step)]\n data = []\n for _c in xrange(0,5):\n self.resetGraph()\n nodes = [n for n in self.cnumber if self.cnumber[n] == k]\n count = int(len(nodes)*step/100)\n for f in fracs:\n print(_c, f, len(nodes), count)\n self.graph, nodes = self.removeNodes(nodes, count)\n\n kcore_exp = kexp.KCoreExperiment(self.graph, None, ftype='object', top=self.top)\n _, _, _, error = kcore_exp.expRandomMissingEdges(5, 10, 50)\n\n exp = histogram.KCoreHistogram(self.graph, None, ftype='object')\n cdata = exp.runExperiment()\n data += [[k,f] + [e[0] for e in error] + [e[1] for e in error] + d for d in cdata]\n\n self.saveData(data)\n\n def runExpAddPeak(self, k):\n \"\"\"\n Add a peak at given core number\n \"\"\"\n step = 1\n fracs = [i for i in xrange(0, 10, step)]\n data = []\n ext_nodes = [n for n in self.cnumber if self.cnumber[n] == k]\n count = int(len(ext_nodes)*step/100)\n for _c in xrange(0,5):\n lst = 0\n edges = []\n in_nodes = []\n for f in fracs:\n print(_c, f, len(nodes), count)\n graph, in_nodes, edges, lst = self.addPeak(k, in_nodes, edges, count, lst, ext_nodes)\n\n kcore_exp = kexp.KCoreExperiment(graph, None, ftype='object', top=self.top)\n _, _, _, error = kcore_exp.expRandomMissingEdges(5, 10, 50)\n\n exp = histogram.KCoreHistogram(graph, None, ftype='object')\n cdata = exp.runExperiment()\n data += [[k,f] + [e[0] for e in error] + [e[1] for e in error] + d for d in cdata]\n\n self.saveData(data, t='added')\n", "sub_path": "src/data/kcore_graph.py", "file_name": "kcore_graph.py", "file_ext": "py", "file_size_in_byte": 4276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "networkx.read_adjlist", "line_number": 19, "usage_type": "call"}, {"api_name": "networkx.read_edgelist", "line_number": 21, "usage_type": "call"}, {"api_name": "decomposition.kcore.KCore", "line_number": 22, "usage_type": "call"}, {"api_name": "decomposition.kcore", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "networkx.write_edgelist", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 54, "usage_type": "call"}, {"api_name": "experiments.kcore.KCoreExperiment", "line_number": 77, "usage_type": "call"}, {"api_name": "experiments.kcore", "line_number": 77, "usage_type": "name"}, {"api_name": "experiments.histogram.KCoreHistogram", "line_number": 80, "usage_type": "call"}, {"api_name": "experiments.histogram", "line_number": 80, "usage_type": "name"}, {"api_name": "experiments.kcore.KCoreExperiment", "line_number": 103, "usage_type": "call"}, {"api_name": "experiments.kcore", "line_number": 103, "usage_type": "name"}, {"api_name": "experiments.histogram.KCoreHistogram", "line_number": 106, "usage_type": "call"}, {"api_name": "experiments.histogram", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "392312654", "text": "import time\nimport torch\nimport torch.nn.functional as F\n\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader\nfrom util.config import DATASET_PARAMETERS, NETWORKS_PARAMETERS\nfrom util.parse_dataset import csv_to_list\nfrom network import restore_train, get_network\nfrom utils import Meter, cycle, save_model, get_collate_fn, Logger\nfrom dataset import VoiceDataset, FaceDataset, Voice_Face_Dataset\n\n# dataset and dataloader\nprint('Parsing your dataset...')\nvoice_list, face_list, id_class_num, emotion_class_num = csv_to_list(DATASET_PARAMETERS)\nprint('voice samples num = %d, face samples num = %d' %(len(voice_list),len(face_list)))\nprint('Preparing the datasets...')\n# voice_face_dataset = Voice_Face_Dataset(voice_list,face_list,DATASET_PARAMETERS['nframe_range'])\nvoice_dataset = VoiceDataset(voice_list,DATASET_PARAMETERS['nframe_range'])\nface_dataset = FaceDataset(face_list)\n\nprint('Preparing the dataloaders...')\ncollate_fn = get_collate_fn(DATASET_PARAMETERS['nframe_range'])\n\nvoice_loader = DataLoader(voice_dataset, shuffle=True, drop_last=True,\n batch_size=DATASET_PARAMETERS['batch_size'],\n num_workers=DATASET_PARAMETERS['workers_num'], # 使用多进程加载的进程数\n collate_fn=collate_fn\n ) # 如何将多个样本数据拼接成一个batch\nface_loader = DataLoader(face_dataset, shuffle=True, drop_last=True,\n batch_size=DATASET_PARAMETERS['batch_size'],\n num_workers=DATASET_PARAMETERS['workers_num'])\n\nvoice_iterator = iter(cycle(voice_loader))\nface_iterator = iter(cycle(face_loader))\n\nprint('Initializing networks...')\nNETWORKS_PARAMETERS['e']['output_channel'] = id_class_num\ne_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, test=True) # 部分训练\nNETWORKS_PARAMETERS['g']['input_channel'][1] = emotion_class_num\ng_net, g_optimizer = get_network('g', NETWORKS_PARAMETERS, train=True)\n# NETWORKS_PARAMETERS['d1-condition']['input_channel'][1] = emotion_class_num\nd1_net, d1_optimizer = get_network('d0', NETWORKS_PARAMETERS, train=True)\nd2_net, d2_optimizer = get_network('d0', NETWORKS_PARAMETERS, train=True)\nf1_net, f1_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)\nf2_net, f2_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)\n\nNETWORKS_PARAMETERS['c']['output_channel'] = id_class_num\nc1_net, c1_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)\nNETWORKS_PARAMETERS['c']['output_channel'] = emotion_class_num\nc2_net, c2_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)\n\n\n\n# 接力训练,载入已有的模型\nif NETWORKS_PARAMETERS['finetune']:\n restore_train(g_net, d1_net, f1_net, f2_net)\n\n# label for real/fake faces\nreal_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 1)\nfake_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 0)\nD_loss_positive = torch.tensor(1, dtype=torch.float)\nD_loss_negative = D_loss_positive * -1\n\n# Meters for recording the training status 日志模块 #\nwriter = SummaryWriter(\"./models/log\")\nlogger = Logger(DATASET_PARAMETERS['log_dir'], time.strftime(\"%Y-%m-%d,%H,%M\"))\niteration = Meter('Iter', 'sum', ':5d')\ndata_time = Meter('Data', 'sum', ':4.2f')\nbatch_time = Meter('Time', 'sum', ':4.2f')\nD_real = Meter('D_real', 'avg', ':4.3f')\nD_fake = Meter('D_fake', 'avg', ':4.3f')\nC1_real = Meter('C1_real', 'avg', ':4.3f')\nC2_real= Meter('C2_real', 'avg', ':4.3f')\nC1_fake = Meter('C1_fake', 'avg', ':4.3f')\nC2_fake= Meter('C2_fake', 'avg', ':4.3f')\nGD_fake = Meter('G_D_fake', 'avg', ':4.3f')\n\ncriterionL1 = torch.nn.L1Loss()\nprint('Training models...')\nfor it in range(600000+1):\n # data\n start_time = time.time()\n # voice, face, voice_identity_label, voice_emotion_label = next(voice_face_iterator)\n # face_identity_label, face_emotion_label = voice_identity_label, voice_emotion_label\n voice, voice_identity_label, voice_emotion_label = next(voice_iterator)\n face, face_identity_label, face_emotion_label = next(face_iterator)\n\n noise = 0.05*torch.randn(DATASET_PARAMETERS['batch_size'], 64, 1, 1) # 标准正态分布\n\n # use GPU or not\n if NETWORKS_PARAMETERS['GPU']:\n voice, voice_identity_label, voice_emotion_label = voice.cuda(), voice_identity_label.cuda(), voice_emotion_label.cuda()\n face, face_identity_label, face_emotion_label = face.cuda(), face_identity_label.cuda(), face_emotion_label.cuda()\n real_label, fake_label = real_label.cuda(), fake_label.cuda()\n noise = noise.cuda()\n D_loss_positive, D_loss_negative = D_loss_positive.cuda(), D_loss_negative.cuda()\n\n\n # get embeddings and generated faces\n embeddings = e_net(voice)\n embeddings = F.normalize(embeddings)\n # introduce some permutations\n embeddings = embeddings + noise\n embeddings = F.normalize(embeddings)\n embeddings = embeddings.squeeze() # 压缩维度从64,128,1,1 --> 64,128\n\n # 扩展维度从64,1 --> 64, 8, 128, 128 , nn.Embedding(emotion_class_num,emotion_class_num)\n face_EM_label_ = torch.zeros((DATASET_PARAMETERS['batch_size'], emotion_class_num)).scatter_(1, face_emotion_label.type(torch.LongTensor).unsqueeze(1), 1)\n face_EM_label_ = face_EM_label_.unsqueeze(2).unsqueeze(3).expand(DATASET_PARAMETERS['batch_size'], emotion_class_num, face.size(2), face.size(3))\n face_EM_label_ = face_EM_label_.cuda()\n voice_EM_label_ = torch.zeros((DATASET_PARAMETERS['batch_size'], emotion_class_num)).scatter_(1, voice_emotion_label.type(torch.LongTensor).unsqueeze(1), 1)\n voice_EM_label_ = voice_EM_label_.unsqueeze(2).unsqueeze(3).expand(DATASET_PARAMETERS['batch_size'], emotion_class_num, face.size(2), face.size(3))\n voice_EM_label_ = voice_EM_label_.cuda()\n\n fake_face = g_net(embeddings.unsqueeze(2).unsqueeze(3)) # G条件输入\n\n \"\"\" --------------------update Discriminators and classifers-------------------------- \"\"\"\n f1_optimizer.zero_grad()\n f2_optimizer.zero_grad()\n d1_optimizer.zero_grad()\n d2_optimizer.zero_grad()\n c1_optimizer.zero_grad()\n c2_optimizer.zero_grad()\n\n # ------- 真实样本score------- #\n real_score_out_1 = d1_net(f1_net(face)) # D1无条件输入\n real_score_out_2 = d2_net(f2_net(face)) # D2无条件输入\n D1_real_loss= F.binary_cross_entropy(torch.sigmoid(real_score_out_1), real_label) # BCE loss\n D2_real_loss = F.binary_cross_entropy(torch.sigmoid(real_score_out_2), real_label) # BCE loss\n D_real_loss = 1*D1_real_loss+0*D2_real_loss\n\n # ------- 生成样本score------- #\n fake_score_out_1 = d1_net(f1_net(fake_face.detach())) # D1无条件输入\n fake_score_out_2 = d2_net(f2_net(fake_face.detach())) # D2无条件输入\n D1_fake_loss = F.binary_cross_entropy(torch.sigmoid(fake_score_out_1), fake_label)\n D2_fake_loss = F.binary_cross_entropy(torch.sigmoid(fake_score_out_2), fake_label)\n D_fake_loss = 1*D1_fake_loss+0*D2_fake_loss\n\n real_id_label_out = c1_net(f1_net(face)) # 计算 c1,c2 loss\n real_emotion_label_out = c2_net(f2_net(face)) # 计算 c1,c2 loss\n C1_real_loss = F.nll_loss(F.log_softmax(real_id_label_out, dim=1), face_identity_label)\n C2_real_loss = F.nll_loss(F.log_softmax(real_emotion_label_out, dim=1), face_emotion_label)\n\n (D_fake_loss + D_real_loss + 1*C1_real_loss + 1*C2_real_loss).backward()\n\n f1_optimizer.step()\n f2_optimizer.step()\n d1_optimizer.step()\n d2_optimizer.step()\n c1_optimizer.step()\n c2_optimizer.step()\n # ---------------------------------------------\n D_real.update(D_real_loss.item())\n D_fake.update(D_fake_loss.item())\n C1_real.update(C1_real_loss.item())\n C2_real.update(C2_real_loss.item())\n # ---------------------------------------------\n\n \"\"\" --------------------------------update Generator --------------------------------------\"\"\"\n g_optimizer.zero_grad()\n\n fake_id_label_out = c1_net(f1_net(fake_face))\n fake_emotion_label_out = c2_net(f2_net(fake_face))\n fake_score_out_1 = d1_net(f1_net(fake_face)) # D无条件输入\n fake_score_out_2 = d2_net(f2_net(fake_face))\n\n GD_fake_loss1 = F.binary_cross_entropy(torch.sigmoid(fake_score_out_1), real_label)\n GD_fake_loss2 = F.binary_cross_entropy(torch.sigmoid(fake_score_out_2), real_label)\n GD_fake_loss = 1*GD_fake_loss1 +0*GD_fake_loss2\n\n GC1_fake_loss = F.nll_loss(F.log_softmax(fake_id_label_out, dim=1), voice_identity_label) # 用真实标签替代随机标签?\n GC2_fake_loss = F.nll_loss(F.log_softmax(fake_emotion_label_out, dim=1), voice_emotion_label)\n # loss_G_L1_1 = criterionL1(fake_face, face) * 100\n # GD_fake_loss = fake_score_out.mul(-1).mean() # hing loss\n (GD_fake_loss + 1*GC1_fake_loss + 1*GC2_fake_loss).backward()\n\n g_optimizer.step()\n\n # ---------------------------------------------\n GD_fake.update(GD_fake_loss.item())\n C1_fake.update(GC1_fake_loss.item())\n C2_fake.update(GC2_fake_loss.item())\n batch_time.update(time.time() - start_time)\n # ---------------------------------------------\n\n # print status\n if it % 10 == 0:\n logger.info([iteration.__str__() + batch_time.__str__() +\n D_real.__str__() + D_fake.__str__() + C1_real.__str__() +C2_real.__str__()+C1_fake.__str__()+C2_fake.__str__()+\n GD_fake.__str__() ])\n\n writer.add_scalars('data/scalar_group', {\"D_real\": D_real_loss,\n \"D_fake\": D_fake_loss,\n \"C1_real_loss\":C1_real_loss,\n \"C2_real_loss\":C2_real_loss,\n \"C1_fake_loss\": GC1_fake_loss,\n \"C2_fake_loss\": GC2_fake_loss,\n \"GD_fake_loss\":GD_fake_loss}, it)\n\n # info = {'image/real_images': real_images(face, 8), 'image/generated_images': generate_img(fake_face, 8)}\n # writer.add_images('image/generated_images', generate_img(fake_face, 8), it)\n batch_time.reset()\n D_real.reset()\n D_fake.reset()\n C1_real.reset()\n C2_real.reset()\n C1_fake.reset()\n C2_fake.reset()\n GD_fake.reset()\n\n # snapshot\n if it % 2000 == 0:\n s_time = time.strftime(\"%m-%d,%H,%M\") + '-' + str(it) + '-'\n # save_model(e_net, 'models/voice_embedding/{}voice_embedding.pth'.format(s_time))\n save_model(g_net, 'models/generator/{}generator.pth'.format(s_time))\n # save_model(d1_net, 'models/discriminator/{}discriminator.pth'.format(s_time))\n # save_model(f1_net, 'models/face_embedding/{}face_embedding1.pth'.format(s_time))\n # save_model(f2_net, 'models/face_embedding/{}face_embedding2.pth'.format(s_time))\n\n iteration.update(1)\n# writer.export_scalars_to_json(\"./models/log/all_scalars.json\")\n# writer.close()\n\n\n", "sub_path": "gan_train_v2.py", "file_name": "gan_train_v2.py", "file_ext": "py", "file_size_in_byte": 11070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "util.parse_dataset.csv_to_list", "line_number": 15, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 15, "usage_type": "argument"}, {"api_name": "dataset.VoiceDataset", "line_number": 19, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 19, "usage_type": "name"}, {"api_name": "dataset.FaceDataset", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.get_collate_fn", "line_number": 23, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 25, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 26, "usage_type": "name"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 30, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 31, "usage_type": "name"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 32, "usage_type": "name"}, {"api_name": "utils.cycle", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.cycle", "line_number": 35, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 38, "usage_type": "name"}, {"api_name": "network.get_network", "line_number": 39, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 39, "usage_type": "argument"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 40, "usage_type": "name"}, {"api_name": "network.get_network", "line_number": 41, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 41, "usage_type": "argument"}, {"api_name": "network.get_network", "line_number": 43, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 43, "usage_type": "argument"}, {"api_name": "network.get_network", "line_number": 44, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 44, "usage_type": "argument"}, {"api_name": "network.get_network", "line_number": 45, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 45, "usage_type": "argument"}, {"api_name": "network.get_network", "line_number": 46, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 46, "usage_type": "argument"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 48, "usage_type": "name"}, {"api_name": "network.get_network", "line_number": 49, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 49, "usage_type": "argument"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 50, "usage_type": "name"}, {"api_name": "network.get_network", "line_number": 51, "usage_type": "call"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 51, "usage_type": "argument"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 56, "usage_type": "name"}, {"api_name": "network.restore_train", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.full", "line_number": 60, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.full", "line_number": 61, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.Logger", "line_number": 67, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 67, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 73, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.Meter", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 89, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 89, "usage_type": "name"}, {"api_name": "util.config.NETWORKS_PARAMETERS", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 109, "usage_type": "attribute"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 112, "usage_type": "call"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 112, "usage_type": "attribute"}, {"api_name": "util.config.DATASET_PARAMETERS", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 173, "usage_type": "call"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 214, "usage_type": "call"}, {"api_name": "utils.save_model", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "597328864", "text": "import asyncio\r\nimport threading\r\nimport signal\r\nimport traceback\r\n\r\nfrom ..client import Client\r\nfrom ..logging import get_logger\r\nfrom ..typing import Iterable, Level, List, Optional\r\n\r\nfrom ..utils import tasks\r\nfrom ..utils._async import shutdown_loop\r\nfrom ..utils.decorators import run_once\r\nfrom ..utils.filters import Filters\r\n\r\n__all__ = (\r\n 'AbstractScanner', 'TimelyLevelScanner', 'RateLevelScanner',\r\n 'thread', 'get_loop', 'set_loop', 'run', 'differ',\r\n 'daily_listener', 'weekly_listener', 'rate_listener', 'unrate_listener',\r\n 'all_listeners'\r\n)\r\n\r\nloop = asyncio.new_event_loop()\r\n\r\nscanner_client = Client(loop=loop)\r\n\r\nlog = get_logger(__name__)\r\n\r\nall_listeners = []\r\n\r\n\r\ndef get_loop() -> asyncio.AbstractEventLoop:\r\n return loop\r\n\r\n\r\ndef set_loop(new_loop: asyncio.AbstractEventLoop) -> None:\r\n global loop\r\n loop = new_loop\r\n\r\n\r\ndef run(loop: asyncio.AbstractEventLoop) -> None:\r\n try:\r\n loop.add_signal_handler(signal.SIGINT, loop.stop)\r\n loop.add_signal_handler(signal.SIGTERM, loop.stop)\r\n\r\n except (NotImplementedError, RuntimeError):\r\n pass\r\n\r\n asyncio.set_event_loop(loop)\r\n\r\n try:\r\n loop.run_forever()\r\n\r\n except KeyboardInterrupt:\r\n log.info('Received the signal to terminate the event loop.')\r\n\r\n finally:\r\n log.info('Cleaning up tasks.')\r\n shutdown_loop(loop)\r\n\r\n\r\ndef update_thread_loop(thread: threading.Thread, loop: asyncio.AbstractEventLoop) -> None:\r\n thread.args = (loop,)\r\n\r\n\r\nthread = threading.Thread(target=run, args=(loop,), name='ScannerThread', daemon=True)\r\n\r\n\r\nclass AbstractScanner:\r\n def __init__(\r\n self, delay: float = 10.0, *,\r\n loop: Optional[asyncio.AbstractEventLoop] = None\r\n ) -> None:\r\n if loop is None:\r\n loop = get_loop()\r\n self.loop = loop\r\n self.runner = tasks.loop(seconds=delay, loop=loop)(self.main)\r\n self.cache = None\r\n self.clients = []\r\n all_listeners.append(self)\r\n\r\n def add_client(self, client: Client) -> None:\r\n \"\"\"Add a client to fire events for.\"\"\"\r\n if client not in self.clients:\r\n self.clients.append(client)\r\n\r\n def attach_to_loop(self, loop: asyncio.AbstractEventLoop) -> None:\r\n \"\"\"Attach the runner to another event loop.\"\"\"\r\n self.runner.loop = loop\r\n self.loop = loop\r\n\r\n def enable(self) -> None:\r\n try:\r\n self.runner.start()\r\n except RuntimeError:\r\n pass\r\n\r\n @run_once\r\n def close(self, *args, force: bool = True) -> None:\r\n \"\"\"Accurately shutdown a scanner.\r\n If force is true, cancel the runner, and wait until it finishes otherwise.\r\n \"\"\"\r\n if force:\r\n self.runner.cancel()\r\n else:\r\n self.runner.stop()\r\n\r\n async def on_error(self, exc: Exception) -> None:\r\n \"\"\"Basic event handler to print the errors if any occur.\"\"\"\r\n traceback.print_exc()\r\n\r\n async def scan(self) -> None:\r\n \"\"\"This function should contain main code of the scanner.\"\"\"\r\n pass\r\n\r\n async def main(self) -> None:\r\n \"\"\"Main function, that is basically doing all the job.\"\"\"\r\n try:\r\n await self.scan()\r\n\r\n except Exception as exc:\r\n await self.on_error(exc)\r\n\r\n\r\nclass TimelyLevelScanner(AbstractScanner):\r\n def __init__(\r\n self, t_type: str, delay: int = 10.0, *,\r\n loop: Optional[asyncio.AbstractEventLoop] = None\r\n ) -> None:\r\n super().__init__(delay, loop=loop)\r\n self.method = getattr(scanner_client, 'get_' + t_type)\r\n self.call_method = 'new_' + t_type\r\n\r\n async def scan(self) -> None:\r\n \"\"\"Scan for either daily or weekly levels.\"\"\"\r\n timely = await self.method()\r\n\r\n if self.cache is None:\r\n self.cache = timely\r\n return\r\n\r\n if timely.id != self.cache.id:\r\n for client in self.clients:\r\n dispatcher = client.dispatch(self.call_method, timely)\r\n self.loop.create_task(dispatcher) # schedule the execution\r\n\r\n self.cache = timely\r\n\r\n\r\nclass RateLevelScanner(AbstractScanner):\r\n def __init__(\r\n self, listen_to_rate: bool = True, delay: float = 10.0,\r\n *, loop: Optional[asyncio.AbstractEventLoop] = None\r\n ) -> None:\r\n super().__init__(delay, loop=loop)\r\n self.call_method = 'level_rated' if listen_to_rate else 'level_unrated'\r\n self.filters = Filters(strategy='awarded')\r\n self.find_new = listen_to_rate\r\n self.cache = []\r\n\r\n async def method(self, pages: int = 2) -> List[Level]:\r\n return await scanner_client.search_levels(filters=self.filters, pages=range(pages))\r\n\r\n async def scan(self) -> None:\r\n new = await self.method()\r\n\r\n if not self.cache:\r\n self.cache = new\r\n return\r\n\r\n difference = differ(self.cache, new, self.find_new)\r\n\r\n self.cache = new\r\n\r\n for level in await further_differ(difference, self.find_new):\r\n for client in self.clients:\r\n dispatcher = client.dispatch(self.call_method, level)\r\n self.loop.create_task(dispatcher)\r\n\r\n\r\nasync def further_differ(\r\n array: Iterable[Level], find_new: bool = True\r\n) -> List[Level]:\r\n array = list(array)\r\n updated = await asyncio.gather(*(level.refresh() for level in array))\r\n final = list()\r\n\r\n for level, new in zip(array, updated):\r\n if find_new:\r\n if new.is_rated() or new.has_coins_verified():\r\n final.append(new)\r\n else:\r\n if new is None:\r\n final.append(level)\r\n elif not new.is_rated() and not new.has_coins_verified():\r\n final.append(new)\r\n\r\n return final\r\n\r\n\r\ndef differ(before: list, after: list, find_new: bool = True) -> filter:\r\n a, b = (before, after) if find_new else (after, before)\r\n return filter(lambda elem: (elem not in a), b)\r\n\r\n\r\ndaily_listener = TimelyLevelScanner('daily')\r\nweekly_listener = TimelyLevelScanner('weekly')\r\n\r\nrate_listener = RateLevelScanner(listen_to_rate=True)\r\nunrate_listener = RateLevelScanner(listen_to_rate=False)\r\n", "sub_path": "gd/events/scanner.py", "file_name": "scanner.py", "file_ext": "py", "file_size_in_byte": 6214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "asyncio.new_event_loop", "line_number": 22, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.get_logger", "line_number": 26, "usage_type": "call"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 31, "usage_type": "attribute"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 35, "usage_type": "attribute"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 40, "usage_type": "attribute"}, {"api_name": "signal.SIGINT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "signal.SIGTERM", "line_number": 43, "usage_type": "attribute"}, {"api_name": "asyncio.set_event_loop", "line_number": 48, "usage_type": "call"}, {"api_name": "utils._async.shutdown_loop", "line_number": 58, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 61, "usage_type": "attribute"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 61, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 65, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 71, "usage_type": "name"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 71, "usage_type": "attribute"}, {"api_name": "utils.tasks.loop", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.tasks", "line_number": 76, "usage_type": "name"}, {"api_name": "client.Client", "line_number": 81, "usage_type": "name"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 86, "usage_type": "attribute"}, {"api_name": "utils.decorators.run_once", "line_number": 97, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 109, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 127, "usage_type": "name"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 127, "usage_type": "attribute"}, {"api_name": "client.dispatch", "line_number": 143, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 152, "usage_type": "name"}, {"api_name": "asyncio.AbstractEventLoop", "line_number": 152, "usage_type": "attribute"}, {"api_name": "utils.filters.Filters", "line_number": 156, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.Level", "line_number": 160, "usage_type": "name"}, {"api_name": "client.dispatch", "line_number": 176, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 181, "usage_type": "name"}, {"api_name": "typing.Level", "line_number": 181, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 184, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Level", "line_number": 182, "usage_type": "name"}]} +{"seq_id": "256098379", "text": "import os\nimport codecs\nimport yaml\nimport pytest\n\ndef __get_path(file):\n path, _ = os.path.split(__file__)\n return os.path.join(path, file)\n\ndef load_fixture(file):\n path = __get_path(file + \".yml\")\n try:\n with open(path, 'r') as stream:\n return yaml.load(stream)\n except IOError as err:\n pytest.fail(\"Could not find fixture: {}\".format(err), pytrace=False)\n\ndef load_raw_fixture(file):\n path = __get_path(file)\n try:\n with codecs.open(path, \"r\", \"utf-8\") as stream:\n return stream.read()\n except IOError as err:\n pytest.fail(\"Could not find fixture: {}\".format(err), pytrace=False)\n \n\n\n", "sub_path": "tests/unit/fixtures/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.split", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 16, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "547425916", "text": "import numpy as np\nfrom PIL import Image\nfrom skimage import transform\n\n\nclass Augmentation:\n\n augmented_images = []\n augmented_labels = []\n\n def rotate_images(self, angle, directory, label):\n for image in directory:\n rotated_image = transform.rotate(image, angle=angle, mode='reflect')\n self.augmented_images.append(rotated_image.tolist())\n self.augmented_labels.append(label)\n\n def scale_images(self, scale_factor, boxing, directory, label):\n for image in directory:\n scaled_image = transform.rescale(image, scale=scale_factor)\n scaled_image_as_png = Image.fromarray(scaled_image.astype('uint8'), 'RGB')\n cropped_image = scaled_image_as_png.crop(boxing)\n np_scaled_image = np.array(cropped_image)\n self.augmented_images.append(np_scaled_image.tolist())\n self.augmented_labels.append(label)\n", "sub_path": "src/augmentation.py", "file_name": "augmentation.py", "file_ext": "py", "file_size_in_byte": 917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "skimage.transform.rotate", "line_number": 13, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 13, "usage_type": "name"}, {"api_name": "skimage.transform.rescale", "line_number": 19, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "644277244", "text": "# -*- coding: utf-8 -*-\n'''\nBeacon to fire events at login of users as registered in the wtmp file\n\n.. code-block:: yaml\n\n beacons:\n wtmp: {}\n'''\n\n# Import Python libs\nfrom __future__ import absolute_import\nimport os\nimport struct\n\n# Import 3rd-party libs\nfrom salt.ext.six.moves import range\n\n# Import salt libs\nimport salt.utils\n\n__virtualname__ = 'wtmp'\nWTMP = '/var/log/wtmp'\nFMT = ' divide and conquer: how about the left subtree; how about right subtree\n# how about their relationship.\n# we need to compare left, right subtree level, therefore, there should be a helper\n# to return level and value, and at the same time, we need maintain level during dfs\n\n# BFS: while + queue(deque)\n# we should maintain a leftvalue, and we needn't keep others\n# but just make sure the first value of the level will be updated, others won't\n# AND be careful that, the left value may be 0. Therefore, pay attention to \n# the judegement when checking its existence.\n\n\n## Solution:\n# definition of binary tree\nclass TreeNode(Object):\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\nclass Solution(Object):\n # DFS: recursion(divide and conquer)\n # @params: root of the binary tree\n # @return: left most value\n def findBottomLeftValue1(self, root):\n # corner case\n if not root:\n return None\n _, val = self.dfshelper(root, 0)\n return val\n # @params: root of the binary tree; level of current root\n # @return: left most level and value \n def dfshelper(self, root, level):\n # base case\n if not root:\n return -1, None\n if not root.left and not root.right:\n return level, root.val\n # divide and conquer\n llevel, lval = self.dfshelper(root.left, level + 1)\n rlevel, rval = self.dfshelper(root.right, level + 1)\n if llevel >= rlevel:\n return llevel, lval\n return rlevel, rval\n\n # BFS: while + queue(deque)\n def findBottomLeftValue2(self, root):\n if not root:\n return None\n from collections import deque\n q = deque([root])\n result = root.val\n while q:\n leftval = None\n qlen = len(q)\n for i in xrange(qlen):\n current = q.popleft()\n if leftval is None:\n leftval = current.val\n seq = (current.left, current.right)\n for node in seq:\n if node:\n q.append(node)\n result = leftval\n return result\n\n\n\n", "sub_path": "data_structures/tree/binary_tree/find_bottom_left_value.py", "file_name": "find_bottom_left_value.py", "file_ext": "py", "file_size_in_byte": 2509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "504075676", "text": "from pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LAParams, LTTextBox, LTTextLine\nfrom tikapp import TikaApp\nimport PyPDF2, subprocess, os, sys\n\nsys.path.append(os.path.dirname(os.getcwd()))\nfrom common.recursive_folders import recursive_folders\n\nclass pdf_to_text():\n \"\"\"Converts pdf to text with pdfminer\"\"\"\n def __init__(self):\n pass\n \n def convert_pdfminer(self, fname):\n fp = open(fname, 'rb')\n parser = PDFParser(fp)\n doc = PDFDocument(parser)\n rsrcmgr = PDFResourceManager()\n laparams = LAParams()\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n text = ''\n for page in PDFPage.create_pages(doc):\n interpreter.process_page(page)\n layout = device.get_result()\n for lt_obj in layout:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n text += lt_obj.get_text()\n return text\n\n def convert_PyPDF2(self,fname):\n pdfFileObj = open(fname,'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n text = ''\n for i in range(pdfReader.numPages):\n pageObj = pdfReader.getPage(i)\n text += pageObj.extractText() + '\\n'\n return text\n\n def convert_Tika(self,fname):\n tika_client = TikaApp(file_jar=os.getcwd()+'/tika-app-1.20.jar')\n return tika_client.extract_only_content(fname)\n\nif __name__ == '__main__':\n path = sys.argv[1]\n p = pdf_to_text()\n r = recursive_folders()\n for arq in r.find_files(path):\n if arq[-3:] == 'pdf' or arq[-3:] == 'doc' or arq[-4:] == 'docx':\n texto = p.convert_Tika(arq)\n arq = open(arq.replace('pdf','txt').replace('docx','txt').replace('doc','txt'),'w')\n arq.write(texto)\n arq.close()", "sub_path": "common_nlp/pdf_to_text.py", "file_name": "pdf_to_text.py", "file_ext": "py", "file_size_in_byte": 2099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "pdfminer.pdfparser.PDFParser", "line_number": 20, "usage_type": "call"}, {"api_name": "pdfminer.pdfdocument.PDFDocument", "line_number": 21, "usage_type": "call"}, {"api_name": "pdfminer.pdfinterp.PDFResourceManager", "line_number": 22, "usage_type": "call"}, {"api_name": "pdfminer.layout.LAParams", "line_number": 23, "usage_type": "call"}, {"api_name": "pdfminer.converter.PDFPageAggregator", "line_number": 24, "usage_type": "call"}, {"api_name": "pdfminer.pdfinterp.PDFPageInterpreter", "line_number": 25, "usage_type": "call"}, {"api_name": "pdfminer.pdfpage.PDFPage.create_pages", "line_number": 27, "usage_type": "call"}, {"api_name": "pdfminer.pdfpage.PDFPage", "line_number": 27, "usage_type": "name"}, {"api_name": "pdfminer.layout.LTTextBox", "line_number": 31, "usage_type": "argument"}, {"api_name": "pdfminer.layout.LTTextLine", "line_number": 31, "usage_type": "argument"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 37, "usage_type": "call"}, {"api_name": "tikapp.TikaApp", "line_number": 45, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "common.recursive_folders.recursive_folders", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "563006305", "text": "from collections import Counter\n\na = int(input())\nb = []\nfor i in range(a):\n c = str(input())\n b.append(c)\nb = sorted(b, reverse=True)\nd = Counter(b).most_common()\nprint('%s %s' % (d[0][0], d[0][1]))\n\n\n\n", "sub_path": "9612 Maximum Word Frequency.py", "file_name": "9612 Maximum Word Frequency.py", "file_ext": "py", "file_size_in_byte": 209, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.Counter", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "633752995", "text": "import numpy as np\nimport torch\nimport torchvision.transforms as trans\nfrom torch.utils.data import DataLoader, random_split\n\n\ndef load_raw(path, latent_filter=None):\n data_zip = np.load(path, allow_pickle=True)\n\n imgs = data_zip['imgs']\n latents_values = data_zip['latents_values']\n latents_classes = data_zip['latents_classes']\n\n if latent_filter is not None:\n idx = latent_filter(latents_values)\n\n imgs = imgs[idx]\n latents_values = latents_classes[idx]\n latents_classes = latents_classes[idx]\n\n imgs = torch.from_numpy(imgs).to(dtype=torch.float32)\n latents_values = torch.from_numpy(latents_values).to(dtype=torch.float32)\n latents_classes = torch.from_numpy(latents_classes).to(dtype=torch.float32)\n\n return imgs, latents_classes, latents_values\n\n\nclass BatchGenerator:\n def __init__(self, data, batch_size, shuffle=True, random_state=None):\n if random_state is None or isinstance(random_state, int):\n random_state = np.random.RandomState(random_state)\n\n self.imgs, self.latent_values, self.classes = data\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.random_state = random_state\n\n def __len__(self):\n return len(self.imgs) // self.batch_size\n\n def __iter__(self):\n order = np.arange(len(self.imgs))\n if self.shuffle:\n self.random_state.shuffle(order)\n\n for i in range(len(self)):\n idx = np.arange(i * self.batch_size, (i + 1) * self.batch_size)\n yield self._get_batch(order[idx])\n\n def _get_batch(self, idx):\n imgs = self.imgs[idx]\n latent_values = self.latent_values[idx]\n latent_classes = self.classes[idx]\n\n return imgs, latent_values, latent_classes\n\n\nclass UnsupervisedLoader(BatchGenerator):\n def _get_batch(self, idx):\n imgs, latent_values, latent_classes = super()._get_batch(idx)\n imgs = imgs.reshape(-1, 64 * 64)\n return imgs, imgs\n\n\nclass SupervisedLoader(BatchGenerator):\n def _get_batch(self, idx):\n imgs, latent_values, latent_classes = super()._get_batch(idx)\n imgs = imgs.reshape(-1, 64 * 64)\n return imgs, latent_classes\n\n\nclass SemiSupervisedLoader(BatchGenerator):\n def _get_batch(self, idx):\n imgs, latent_values, latent_classes = super()._get_batch(idx)\n imgs = imgs.reshape(-1, 64 * 64)\n return imgs, (latent_classes, imgs)\n\n\nclass ValidationWrapper:\n def __init__(self, generator, n_batches):\n self.generator = generator\n self.n_batches = n_batches\n\n def __len__(self):\n return self.n_batches\n\n def __iter__(self):\n for i, batch in enumerate(self.generator):\n if i >= self.n_batches:\n break\n yield batch\n\n\ndef get_loader(setting):\n if setting == 'unsupervised':\n return UnsupervisedLoader\n elif setting == 'supervised':\n return SupervisedLoader\n elif setting == 'semi-supervised':\n return SemiSupervisedLoader\n raise ValueError('Unrecognized setting \"{}\"'.format(setting))\n\n\ndef load_dsprites(path, setting, batch_size=32, data_filters=(None, None), \n val_ratio=0.2, shuffle=True, random_state=None):\n train_filter, test_filter = data_filters\n\n train_data = load_raw(path, train_filter)\n test_data = train_data if test_filter is None else load_raw(path,test_filter)\n val_n_batches = np.ceil(len(train_data) * val_ratio / batch_size)\n\n loader = get_loader(setting)\n\n train_data = loader(train_data, batch_size, True, random_state)\n test_data = loader(test_data, batch_size, True, random_state)\n val_data = ValidationWrapper(train_data, val_n_batches)\n\n return train_data, test_data, val_data\n", "sub_path": "src/dataset/dsprites.py", "file_name": "dsprites.py", "file_ext": "py", "file_size_in_byte": 3760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.load", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.random.RandomState", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "377173713", "text": "'''\nbitcoind fork of jmcorgan, branch addrindex-0.9.2:\nhttps://github.com/jmcorgan/bitcoin/tree/addrindex-0.9.2\n'''\nimport logging\nimport binascii\nimport hashlib\nimport json\n\nfrom lib import config, util, util_bitcoin\n\ndef is_multisig(address):\n array = address.split('_')\n return (len(array) > 1)\n\ndef search_raw_transactions(address):\n result = util.call_jsonrpc_api('search_raw_transactions', {'address': address})\n return result['result']\n\ndef get_unspent_txouts(address, return_confirmed=False):\n result = util.call_jsonrpc_api('get_unspent_txouts', {'address': address, 'return_confirmed': return_confirmed})\n return result['result']\n\ndef get_block_count():\n return int(util.bitcoind_rpc('getblockcount', None))\n\ndef check():\n pass\n\ndef getinfo():\n return {\n \"info\": {\n \"blocks\": get_block_count()\n }\n }\n\ndef listunspent(address):\n outputs = get_unspent_txouts(address)\n utxo = []\n for txo in outputs:\n newtxo = {\n 'address': address,\n 'txid': txo['txid'],\n 'vout': txo['vout'],\n 'ts': 0,\n 'scriptPubKey': txo['scriptPubKey'],\n 'amount': float(txo['amount']),\n 'confirmations': txo['confirmations'],\n 'confirmationsFromCache': False\n }\n utxo.append(newtxo)\n return utxo\n\ndef getaddressinfo(address):\n outputs = get_unspent_txouts(address, return_confirmed=True)\n balance = sum(out['amount'] for out in outputs['confirmed'])\n unconfirmed_balance = sum(out['amount'] for out in outputs['all']) - balance\n \n if is_multisig(address):\n array = address.split('_')\n # TODO: filter transactions\n raw_transactions = reversed(search_raw_transactions(array[1:-1][1]))\n else:\n raw_transactions = reversed(search_raw_transactions(address))\n\n transactions = []\n for tx in raw_transactions:\n if 'confirmations' in tx and tx['confirmations'] > 0:\n transactions.append(tx['txid'])\n\n return {\n 'addrStr': address,\n 'balance': balance,\n 'balanceSat': balance * config.UNIT,\n 'unconfirmedBalance': unconfirmed_balance,\n 'unconfirmedBalanceSat': unconfirmed_balance * config.UNIT,\n 'transactions': transactions\n }\n \n return None\n\ndef gettransaction(tx_hash):\n tx = util.bitcoind_rpc('getrawtransaction', [tx_hash, 1])\n valueOut = 0\n for vout in tx['vout']:\n valueOut += vout['value']\n return {\n 'txid': tx_hash,\n 'version': tx['version'],\n 'locktime': tx['locktime'],\n 'confirmations': tx['confirmations'] if 'confirmations' in tx else 0,\n 'blocktime': tx['blocktime'] if 'blocktime' in tx else 0,\n 'blockhash': tx['blockhash'] if 'blockhash' in tx else 0,\n 'time': tx['time'] if 'time' in tx else 0,\n 'valueOut': valueOut,\n 'vin': tx['vin'],\n 'vout': tx['vout']\n }\n return None\n\ndef get_pubkey_from_transactions(address, raw_transactions):\n #for each transaction we got back, extract the vin, pubkey, go through, convert it to binary, and see if it reduces down to the given address\n for tx in raw_transactions:\n #parse the pubkey out of the first sent transaction\n for vin in tx['vin']:\n scriptsig = vin['scriptSig']\n asm = scriptsig['asm'].split(' ')\n pubkey_hex = asm[1]\n try:\n if util_bitcoin.pubkey_to_address(pubkey_hex) == address:\n return pubkey_hex\n except:\n pass\n return None\n\ndef get_pubkey_for_address(address):\n if is_multisig(address):\n array = address.split('_')\n addresses = array[1:-1]\n else:\n addresses = [address]\n \n pubkeys = []\n\n for address in addresses:\n raw_transactions = search_raw_transactions(address)\n pubkey = get_pubkey_from_transactions(address, raw_transactions)\n if pubkey: pubkeys.append(pubkey)\n\n return pubkeys\n", "sub_path": "lib/blockchain.py", "file_name": "blockchain.py", "file_ext": "py", "file_size_in_byte": 4016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "lib.util.call_jsonrpc_api", "line_number": 17, "usage_type": "call"}, {"api_name": "lib.util", "line_number": 17, "usage_type": "name"}, {"api_name": "lib.util.call_jsonrpc_api", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.util", "line_number": 21, "usage_type": "name"}, {"api_name": "lib.util.bitcoind_rpc", "line_number": 25, "usage_type": "call"}, {"api_name": "lib.util", "line_number": 25, "usage_type": "name"}, {"api_name": "lib.config.UNIT", "line_number": 74, "usage_type": "attribute"}, {"api_name": "lib.config", "line_number": 74, "usage_type": "name"}, {"api_name": "lib.config.UNIT", "line_number": 76, "usage_type": "attribute"}, {"api_name": "lib.config", "line_number": 76, "usage_type": "name"}, {"api_name": "lib.util.bitcoind_rpc", "line_number": 83, "usage_type": "call"}, {"api_name": "lib.util", "line_number": 83, "usage_type": "name"}, {"api_name": "lib.util_bitcoin.pubkey_to_address", "line_number": 110, "usage_type": "call"}, {"api_name": "lib.util_bitcoin", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "520507036", "text": "import re\nimport sqlite3\nimport json\n\nFILE_QandQ_NAME = 'law-Q&A-001'\n\nquestionPattern = re.compile(\"(.+?)\\(A\\)(.+?)\\(B\\)(.+?)\\(C\\)(.+?)\\(D\\)(.+)\")\n\nwith open(FILE_QandQ_NAME + '.txt', 'r') as f:\n # f.write(docText)\n i = 1\n questionsList = []\n answers = dict(json.loads(f.readline()))\n answers = {str(int(k)): v for k, v in answers.items()}\n answers[200] = 'A'\n print(answers)\n for q in f:\n # print(q)\n q = questionPattern.search(q).groups()\n q = [i.strip() for i in q]\n # print(len(q))\n questionsList.append((q[0], q[1], q[2], q[3], q[4], answers[str(i)]))\n i += 1\n\nconn = sqlite3.connect('ncbexQandA.db')\nc = conn.cursor()\n# Create table\ntableName = '`NBE SAMPLE TEST ' + FILE_QandQ_NAME + '`'\ncreateColumns = 'id integer primary key, question text, propA text, propB text, propC text, propD text, answer text'\ninsertColumns = 'question, propA, propB, propC, propD, answer'\ncommand = 'CREATE TABLE {} ({})'.format(tableName, createColumns)\nc.execute('DROP TABLE IF EXISTS {}'.format(tableName))\nc.execute(command)\n# Insert a row of data\nc.executemany(\"INSERT INTO {} ({}) VALUES (?,?,?,?,?,?)\".format(tableName, insertColumns), questionsList)\n# Save (commit) the changes\nconn.commit()\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n", "sub_path": "write-in-db.py", "file_name": "write-in-db.py", "file_ext": "py", "file_size_in_byte": 1394, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "235864898", "text": "# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport base64\nimport contextlib\nimport io\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.tools.misc import xlwt\n\nclass QuantValuationExport(models.TransientModel):\n _name = \"quant.valuation.export\"\n\n name = fields.Char('File Name', readonly=True)\n data = fields.Binary('File', readonly=True)\n state = fields.Selection([('choose', 'choose'), ('get', 'get')], default='choose')\n\n @api.multi\n def _get_headers(self):\n return [\n 'Producto',\n 'Tienda',\n 'Cantidad',\n 'Valor',\n ]\n \n @api.multi\n def act_getfile(self):\n this = self[0]\n domain = [('product_id.type', '=', 'product'),('location_id.usage', '=', 'internal')]\n quant_data = self.env['stock.quant'].read_group(domain,fields=['product_id','location_id','quantity'], groupby=['product_id','location_id'])\n locations = self.env['stock.location'].search([('usage', '=', 'internal')])\n print (locations)\n location_qty = {'product_id': '', 'quantity': 0.0, 'inventory_value': 0.0}\n location_header = {}\n for location in locations:\n warehouse = self.env['stock.warehouse'].search([('lot_stock_id', '=', location.id)], limit=1)\n location_qty.setdefault(location.id, 0.0)\n location_header.setdefault(location.id, warehouse.name if warehouse else location.name)\n quant_result = []\n for qd in quant_data:\n product = self.env['product.product'].browse(qd['product_id'][0])\n quant_item = location_qty.copy()\n quant_item.update({'product_id': qd['product_id'][1], 'quantity': qd['quantity'], 'inventory_value': product.stock_value})\n if '__domain' in qd:\n quant_item.update({'product_id': qd['product_id'][1], 'inventory_value': product.stock_value})\n quant_location = self.env['stock.quant'].read_group(qd['__domain'],fields=['location_id','quantity'], groupby=['location_id'])\n for ql in quant_location:\n quant_item.update({ql['location_id'][0]: ql['quantity']})\n quant_result.append(quant_item)\n with contextlib.closing(io.BytesIO()) as buf:\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet('Reporte Valoración Inventario')\n header_bold = xlwt.easyxf(\"font: bold on; pattern: pattern solid, fore_colour gray25;\")\n\n worksheet.write(0,0,'Producto',header_bold)\n key_locations = []\n col = 1\n for key,value in location_header.items():\n worksheet.write(0,col,value,header_bold)\n key_locations.append(key)\n col += 1\n worksheet.write(0,col,'Cantidad Total',header_bold)\n worksheet.write(0,col + 1,'Valor',header_bold)\n\n row = 1\n col = 1\n total_value = 0.0\n for qr in quant_result:\n cell_num = xlwt.easyxf(num_format_str=\"#,##0.00\")\n worksheet.write(row,0,qr['product_id'])\n col = 1\n for key_location in key_locations:\n worksheet.write(row,col,qr[key_location],cell_num)\n col += 1\n worksheet.write(row,col,qr['quantity'],cell_num)\n worksheet.write(row,col + 1,qr['inventory_value'],cell_num)\n total_value += qr['inventory_value']\n row += 1\n cell_num = xlwt.easyxf(\"font: bold on; pattern: pattern solid, fore_colour gray25;\", num_format_str=\"#,##0.00\")\n worksheet.write(row,col,\"Total:\",cell_num)\n worksheet.write(row,col + 1,total_value,cell_num)\n\n workbook.save(buf)\n out = base64.encodestring(buf.getvalue())\n\n filename = 'Reporte valor de Inventario'\n extension = 'xls'\n name = \"%s.%s\" % (filename, extension)\n this.write({'state': 'get', 'data': out, 'name': name})\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'quant.valuation.export',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n", "sub_path": "sale_premiumpaint/wizard/export_quant_line.py", "file_name": "export_quant_line.py", "file_ext": "py", "file_size_in_byte": 4361, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "odoo.models.TransientModel", "line_number": 11, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 11, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.fields.Binary", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 18, "usage_type": "name"}, {"api_name": "contextlib.closing", "line_number": 51, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 51, "usage_type": "call"}, {"api_name": "odoo.tools.misc.xlwt.Workbook", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.tools.misc.xlwt", "line_number": 52, "usage_type": "name"}, {"api_name": "odoo.tools.misc.xlwt.easyxf", "line_number": 54, "usage_type": "call"}, {"api_name": "odoo.tools.misc.xlwt", "line_number": 54, "usage_type": "name"}, {"api_name": "odoo.tools.misc.xlwt.easyxf", "line_number": 70, "usage_type": "call"}, {"api_name": "odoo.tools.misc.xlwt", "line_number": 70, "usage_type": "name"}, {"api_name": "odoo.tools.misc.xlwt.easyxf", "line_number": 80, "usage_type": "call"}, {"api_name": "odoo.tools.misc.xlwt", "line_number": 80, "usage_type": "name"}, {"api_name": "base64.encodestring", "line_number": 85, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 27, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "366011153", "text": "import cv2\nimport numpy as np\nimport face_recognition\nimport os\nimport pickle\nfrom PIL import Image, ImageDraw, ImageFont\nfrom constant import RESIZE_SCALE\nimport DatabaseUtils\nfrom datetime import datetime\n\nFONT_PATH = './arial.ttf'\nfont = ImageFont.truetype(FONT_PATH, 32)\ndef attendance(name):\n with open('attendance.csv', 'r+') as f:\n myDataList = f.readlines()\n nameList = []\n for line in myDataList:\n entry = line.split(',')\n nameList.append(entry[0])\n if name not in nameList:\n now = datetime.now()\n datetimeString = now.strftime('%H:%M:%S')\n # f.writelines(f'\\n{name}, {datetimeString}')\n\nfacesData = DatabaseUtils.getAllFaceData()\n# print(facesData[1].StudentID)\n# print(pickle.loads(facesData[1].EncodedImage))\n\n\nvideo_capture = cv2.VideoCapture(0)\n\nwhile True:\n success, img = video_capture.read()\n resizedImg = cv2.resize(img, (0, 0), None, 1 / RESIZE_SCALE, 1 / RESIZE_SCALE)\n resizedImg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n facesCurrentLocation = face_recognition.face_locations(resizedImg)\n encodedCurrentFaces = face_recognition.face_encodings(resizedImg, facesCurrentLocation)\n for currentFace, location in zip(encodedCurrentFaces, facesCurrentLocation):\n minDistance = 0\n id = \"\"\n name = \"Unknown\"\n # knownFace = pickle.loads(facesData[0].EncodedImage)\n # test = face_recognition.compare_faces([knownFace], encodedCurrentFaces[0])\n # print(test)\n for i, face in enumerate(facesData):\n faceEncodedData = pickle.loads(face.EncodedImage)\n if face_recognition.compare_faces([faceEncodedData],currentFace):\n print('run')\n faceDistance = face_recognition.face_distance([faceEncodedData], currentFace);\n if i == 0:\n minDistance = faceDistance\n id = face.StudentID\n elif faceDistance < minDistance:\n minDistance = faceDistance\n id = face.StudentID\n y1, x2, y2, x1 = location\n cv2.rectangle(img, (x1 - 5, y1 - 5), (x2 + 5, y2 + 5), (0, 255, 0), 2)\n if id != \"\" and minDistance < 0.5:\n studentData = DatabaseUtils.getStudentNameById(id)\n # cv2.rectangle(img, (x1, y2-35), (x2, y2), (0,255,0), cv2.FILLED)\n\n name = studentData.FullName +\" - \"+id\n attendance(studentData.FullName)\n imagePIL = Image.fromarray(img)\n draw = ImageDraw.Draw(imagePIL)\n draw.text((x1, y2 + 25), name, font = font, fill=(255, 255, 255, 0), )\n img = np.array(imagePIL)\n cv2.imshow(\"Cam\", img)\n if cv2.waitKey(1) == ord('q'):\n break;\nvideo_capture.release()\ncv2.destroyAllWindows()", "sub_path": "AttendanceTest.py", "file_name": "AttendanceTest.py", "file_ext": "py", "file_size_in_byte": 2783, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PIL.ImageFont.truetype", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "DatabaseUtils.getAllFaceData", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 34, "usage_type": "call"}, {"api_name": "constant.RESIZE_SCALE", "line_number": 34, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 35, "usage_type": "attribute"}, {"api_name": "face_recognition.face_locations", "line_number": 37, "usage_type": "call"}, {"api_name": "face_recognition.face_encodings", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "face_recognition.compare_faces", "line_number": 48, "usage_type": "call"}, {"api_name": "face_recognition.face_distance", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 58, "usage_type": "call"}, {"api_name": "DatabaseUtils.getStudentNameById", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 65, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "584263844", "text": "import sys\n\ntry:\n import boto3\n\n print(\"imported boto3 successfully\")\nexcept Exception as e:\n print(e)\n sys.exit(1)\nsource_region = 'eu-west-1'\ndestination_region = 'eu-west-2'\nsession = boto3.session.Session(profile_name='cooper')\nec2_source_client = session.client(service_name='ec2', region_name=source_region)\nsts_client = session.client(service_name='sts', region_name=source_region)\naccount_id = sts_client.get_caller_identity().get('Account')\nprint(account_id)\nall_snaps = []\nf_bkp = {\"Name\": \"tag:env\", \"Values\": [\"dev\"]}\nfor each_snap in ec2_source_client.describe_snapshots(OwnerIds=[account_id], Filters=[f_bkp]).get('Snapshots'):\n all_snaps.append(each_snap.get('SnapshotId'))\n print(all_snaps)\n\nec2_des_client = session.client(service_name='ec2', region_name=destination_region)\nfor each_source_snap in all_snaps:\n print(\"taking backup for id of {} into of {}\".format(each_source_snap, destination_region))\n ec2_des_client.copy_snapshot(\n Description=\"Disaster recovery\",\n SourceRegion=source_region,\n DestinationRegion=destination_region,\n SourceSnapshotId=each_source_snap,\n )\nprint(\"EBS Snapshot copy destination region completed\")\nprint(\"Modifying tags\")\nfor each_source_snap in f_bkp:\n ec2_source_client.delete_tags(\n Resources=[each_source_snap],\n Tags=[\n {\n 'Key': 'env',\n 'Value': 'dev'\n }\n ]\n )\n print(\"Creating new tags for {}\".format(each_source_snap))\n ec2_source_client.create_tags(\n Resources=[each_source_snap],\n Tags=[\n {\n 'Key': 'env',\n 'Value': 'dev-copy'\n }\n ]\n )\n", "sub_path": "lambda/cross-region-ebs-auto-snap-copy.py", "file_name": "cross-region-ebs-auto-snap-copy.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.exit", "line_number": 9, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 12, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "111596587", "text": "import requests\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import FormView, TemplateView\n\nfrom .forms import TokenForm\nfrom .models import Project\n\n\nclass HomeView(TemplateView):\n template_name = \"project_admin/home.html\"\n\n def get(self, request, *args, **kwargs):\n token = None\n self.member_data = None\n\n if 'master_access_token' in request.session:\n token = request.session['master_access_token']\n self.member_data = self.token_for_memberlist(token)\n if not self.member_data:\n del request.session['master_access_token']\n\n if self.member_data:\n return super().get(request, *args, **kwargs)\n else:\n return redirect('login')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['member_data'] = self.member_data\n return context\n\n def token_for_memberlist(self, token):\n req_url = ('https://www.openhumans.org/api/direct-sharing/project/'\n 'members/?access_token={}'.format(token))\n req = requests.get(req_url)\n if req.status_code == 200:\n return req.json()\n else:\n messages.error(self.request, 'Token not valid. Maybe a fresh one is needed?')\n return None\n\n\nclass LoginView(FormView):\n template_name = 'project_admin/login.html'\n form_class = TokenForm\n success_url = reverse_lazy('home')\n\n def form_valid(self, form):\n token = form.cleaned_data['token']\n req_url = (\"https://www.openhumans.org/api/direct-sharing/project/?access_token={}\".format(token))\n params = {'token': token}\n r = requests.get(req_url, params=params).json()\n try:\n Project.objects.update_or_create(id=r['id'], defaults=r)\n self.request.session['master_access_token'] = token\n except Exception as e:\n # Handle expired master tokens, or serve error message\n if 'Expired token' in r['detail']:\n messages.error(self.request, 'Token has expired. Refresh your token in the project management interface.')\n else:\n messages.error(self.request, e)\n \n return redirect('home')\n", "sub_path": "project_admin/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 42, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 46, "usage_type": "name"}, {"api_name": "forms.TokenForm", "line_number": 48, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Project.objects.update_or_create", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 57, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 62, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "559513399", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n####\n# 10/2010 Bernd Schlapsi \n#\n# This script is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# gPodder is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# Dependencies:\n# * python-eyed3 (eyeD3 python library - http://eyed3.nicfit.net/)\n# * steghide (steganography program - http://steghide.sourceforge.net/)\n#\n# The script extract the shownotes from the \"Tin Foil Hat\" podcast\n# You can find the instructions how to extract shownotes for the\n# \"Tin Foil Hat\" podcast here:\n# http://cafeninja.blogspot.com/2010/10/tin-foil-hat-show-episode-001.html\n\nimport gpodder\nimport os\nimport shlex\nimport subprocess\nimport tempfile\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntry:\n import eyeD3\nexcept:\n logger.error( '(tfh shownotes hook) Could not find eyeD3')\n\n\nTFH_URL='http://feeds.feedburner.com/TinFoilHat'\nSTEGHIDE_CMD='steghide extract -f -p %(pwd)s -sf %(img)s -xf %(file)s'\n\n\ndef extract_image(filename):\n \"\"\"extract image from the podcast file\"\"\"\n imagefile = None\n try:\n if eyeD3.isMp3File(filename):\n tag = eyeD3.Mp3AudioFile(filename).getTag()\n images = tag.getImages()\n if images:\n tempdir = tempfile.gettempdir()\n img = images[0]\n imagefile = img.getDefaultFileName()\n img.writeFile(path=tempdir, name=imagefile)\n imagefile = \"%s/%s\" % (tempdir, imagefile)\n else:\n logger.info(u'No image found in %s' % filename)\n except:\n pass\n\n return imagefile\n\n\ndef extract_shownotes(imagefile, remove_image=True):\n \"\"\"extract shownotes from the FRONT_COVER.jpeg\"\"\"\n shownotes = None\n password = 'tinfoilhat'\n shownotes_file = '/tmp/shownotes.txt'\n\n if not os.path.exists(imagefile):\n return shownotes\n\n cmd = STEGHIDE_CMD % {\n 'pwd': password,\n 'img': imagefile,\n 'file': shownotes_file\n }\n myprocess = subprocess.Popen(shlex.split(cmd),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (stdout, stderr) = myprocess.communicate()\n\n if remove_image:\n os.remove(imagefile)\n\n if myprocess.returncode == 0:\n #read shownote file\n f = open(shownotes_file, 'r')\n shownotes = unicode(f.read(), \"utf-8\")\n f.close()\n else:\n logger.error(u'Error extracting shownotes from the image file %s' % imagefile)\n\n return shownotes\n\n\nclass gPodderHooks(object):\n def __init__(self):\n logger.info('\"Tin Foil Hat\" shownote extractor extension is initializing.')\n\n def on_episode_downloaded(self, episode):\n if episode.channel.url == TFH_URL:\n filename = episode.local_filename(create=False, check_only=True)\n if filename is None:\n return\n \n imagefile = extract_image(filename)\n if imagefile is None:\n return\n\n shownotes = extract_shownotes(imagefile)\n if shownotes is None:\n return\n\n # save shownotes in the database\n if episode.description.find(shownotes) == -1:\n episode.description = \"%s\\n\\n
%s
\" % (episode.description, shownotes)\n episode.save()\n episode.db.commit()\n logger.info(u'updated shownotes for podcast: (%s/%s)' % (episode.channel.title, episode.title))\n", "sub_path": "tfh_shownotes_hook.py", "file_name": "tfh_shownotes_hook.py", "file_ext": "py", "file_size_in_byte": 3937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "eyeD3.isMp3File", "line_number": 51, "usage_type": "call"}, {"api_name": "eyeD3.Mp3AudioFile", "line_number": 52, "usage_type": "call"}, {"api_name": "tempfile.gettempdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 82, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 82, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "539207314", "text": "import tensorflow as tf\nimport numpy as np\nimport os\nimport sys\nimport random\nimport subprocess\nfrom redis import Redis\nimport time\nsys.path.append(os.path.realpath(\"..\"))\n\nimport helpers.utils as hlp\nfrom models.feed_forward import FFContinuous\n\n\nclass TRPOContinuousTrainer(FFContinuous):\n def __init__(self, sess, args):\n FFContinuous.__init__(self, sess, args)\n self.sess = sess\n self.config = args['config']\n self.env = args['environment']\n self.timesteps_per_launch = args['max_pathlength']\n self.n_workers = args['n_workers']\n self.distributed = args['distributed']\n self.timesteps_per_batch = args['timesteps_batch']\n self.n_tests = args['n_tests']\n self.max_kl = args['max_kl']\n self.normalize = args['normalize']\n self.scale = args['scale']\n self.gamma = args['gamma']\n self.value_updates = args['value_updates']\n self.save_every = args.get('save_every', 1)\n self.sums = self.sumsqrs = self.sumtime = 0\n self.timestep = 0\n self.std = args['std']\n self.create_internal()\n self.init_weights()\n self.train_scores = []\n self.test_scores = []\n np.set_printoptions(precision=6)\n\n # Worker parameters:\n self.id_worker = args['id_worker']\n self.test_mode = args['test_mode']\n\n def create_internal(self):\n self.targets = {\n \"advantage\": tf.placeholder(dtype=tf.float32, shape=[None]),\n \"return\": tf.placeholder(dtype=tf.float32, shape=[None]),\n \"action\": tf.placeholder(dtype=tf.float32, shape=[None, len(self.n_actions)]),\n \"old_mean\": tf.placeholder(dtype=tf.float32, shape=[None, len(self.n_actions)]),\n \"old_std\": tf.placeholder(dtype=tf.float32, shape=[None, len(self.n_actions)]),\n \"flat_tangent\": tf.placeholder(dtype=tf.float32, shape=[None])\n }\n actions = self.targets[\"action\"]\n action_means = self.action_means\n action_stds = self.action_stds\n old_action_means = self.targets[\"old_mean\"]\n old_action_stds = self.targets[\"old_std\"]\n\n log_p = tf.reduce_sum(- 0.5 * tf.square((actions - action_means) / action_stds) \\\n - 0.5 * tf.log(2 * np.pi) - tf.log(action_stds), axis=1)\n\n log_old_p = tf.reduce_sum(- 0.5 * tf.square((actions - old_action_means) / old_action_stds) \\\n - 0.5 * tf.log(2 * np.pi) - tf.log(old_action_stds), axis=1)\n\n ratio_n = tf.exp(log_p - log_old_p)\n self.loss = -tf.reduce_mean(ratio_n * self.targets[\"advantage\"])\n\n def KL_gauss(mean1, std1, mean2, std2):\n return - 0.5 * len(self.n_actions) + tf.reduce_mean(tf.reduce_sum(\n tf.log(std2) - tf.log(std1) + (tf.square(std1) + tf.square(mean1 - mean2)) / (2 * tf.square(std2)),\n axis=1))\n\n self.KL = KL_gauss(old_action_means, old_action_stds, action_means, action_stds)\n\n self.policy_grad = hlp.flatgrad(self.loss, self.weights)\n fixed_means = tf.stop_gradient(action_means)\n fixed_stds = tf.stop_gradient(action_stds)\n KL_firstfixed = KL_gauss(fixed_means, fixed_stds, action_means, action_stds)\n kl_ff_grads = tf.gradients(KL_firstfixed, self.weights)\n w_shapes = list(map(hlp.var_shape, self.weights))\n start = 0\n tangents = []\n for shape in w_shapes:\n size = np.prod(shape)\n param = tf.reshape(self.targets[\"flat_tangent\"][start:(start + size)], shape)\n tangents.append(param)\n start += size\n gvp = [tf.reduce_sum(g * t) for (g, t) in zip(kl_ff_grads, tangents)]\n self.fisher_vector_product = hlp.flatgrad(gvp, self.weights)\n\n self.get_flat = hlp.GetFlat(self.weights, self.sess)\n self.set_from_flat = hlp.SetFromFlat(self.weights, self.sess)\n\n value_loss = tf.reduce_mean((self.targets[\"return\"] - self.value) ** 2)\n\n self.value_train_op = tf.train.AdamOptimizer(0.05).minimize(value_loss, var_list=self.value_weights)\n\n def save(self, name):\n directory = 'saves/' + name + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n directory += 'iteration_{}'.format(self.timestep) + '/'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n for i, tensor in enumerate(tf.global_variables()):\n value = self.sess.run(tensor)\n np.save(directory + 'weight_{}'.format(i), value)\n\n if self.scale != 'off':\n np.save(directory + 'sums', self.sums)\n np.save(directory + 'sumsquares', self.sumsqrs)\n np.save(directory + 'sumtime', self.sumtime)\n\n np.save(directory + 'timestep', np.array([self.timestep]))\n np.save(directory + 'train_scores', np.array(self.train_scores))\n np.save(directory + 'test_scores', np.array(self.test_scores))\n print(\"Agent successfully saved in folder {}\".format(directory))\n\n def load(self, name, iteration=None):\n try:\n directory = 'saves/' + name + '/'\n if not os.path.exists(directory):\n print('That directory does not exist!')\n raise Exception\n if iteration is None:\n iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]])\n directory += 'iteration_{}'.format(iteration) + '/'\n\n for i, tensor in enumerate(tf.global_variables()):\n arr = np.load(directory + 'weight_{}.npy'.format(i))\n self.sess.run(tensor.assign(arr))\n\n if self.scale != 'off':\n self.sums = np.load(directory + 'sums.npy')\n self.sumsqrs = np.load(directory + 'sumsquares.npy')\n self.sumtime = np.load(directory + 'sumtime.npy')\n\n self.timestep = np.load(directory + 'timestep.npy')[0]\n self.train_scores = np.load(directory + 'train_scores.npy').tolist()\n self.test_scores = np.load(directory + 'test_scores.npy').tolist()\n print(\"Agent successfully loaded from folder {}\".format(directory))\n except:\n print(\"Something is wrong, loading failed\")\n\n def init_weights(self):\n self.sess.run(tf.global_variables_initializer())\n init_weights = [self.sess.run(w) for w in self.weights]\n if self.std == \"Param\":\n for i in range(len(init_weights))[-len(self.n_actions):]:\n init_weights[i] /= 10.\n if self.std == \"Train\":\n for i in range(len(init_weights))[-2*len(self.n_actions)::2]:\n init_weights[i] /= 10.\n\n self.set_weights(init_weights)\n\n def make_rollout(self):\n variables_server = Redis(port=12000)\n if self.scale != 'off':\n try:\n means = hlp.load_object(variables_server.get(\"means\"))\n stds = hlp.load_object(variables_server.get(\"stds\"))\n self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))\n except:\n pass\n try:\n weights = [hlp.load_object(variables_server.get(\"weight_{}\".format(i))) for i in\n range(len(self.weights))]\n self.set_weights(weights)\n except:\n pass\n env = self.env\n if self.test_mode:\n n_tasks = self.n_tests\n timesteps_per_worker = 100000000\n else:\n n_tasks = 10000\n timesteps_per_worker = self.timesteps_per_batch // self.n_workers\n\n timestep = 0\n i_task = 0\n\n paths = []\n while timestep < timesteps_per_worker and i_task < n_tasks:\n path = {}\n observations, action_tuples, rewards, dist_tuples, timestamps = [], [], [], [], []\n sums = np.zeros((1, env.get_observation_space()))\n sumsqrs = np.zeros(sums.shape)\n\n env.reset()\n while not env.done and env.timestamp < self.timesteps_per_launch:\n sums += env.features\n sumsqrs += np.square(env.features)\n observations.append(env.features[0])\n timestamps.append(env.timestamp)\n\n if not self.test_mode:\n actions, dist_tuple = self.act(env.features, return_dists=True)\n dist_tuples.append(dist_tuple)\n else:\n actions = self.act(env.features, exploration=False)\n env.step(actions)\n timestep += 1\n\n action_tuples.append(actions)\n rewards.append(env.reward)\n\n path[\"observations\"] = np.array(observations)\n path[\"action_tuples\"] = np.array(action_tuples)\n path[\"rewards\"] = np.array(rewards)\n if not self.test_mode:\n path[\"dist_tuples\"] = np.array(dist_tuples)\n path[\"timestamps\"] = np.array(timestamps)\n path[\"sumobs\"] = sums\n path[\"sumsqrobs\"] = sumsqrs\n path[\"terminated\"] = env.done\n path[\"total\"] = env.get_total_reward()\n paths.append(path)\n i_task += 1\n\n if self.distributed:\n variables_server.set(\"paths_{}\".format(self.id_worker), hlp.dump_object(paths))\n else:\n self.paths = paths\n\n def train(self):\n cmd_server = 'redis-server --port 12000'\n p = subprocess.Popen(cmd_server, shell=True, preexec_fn=os.setsid)\n self.variables_server = Redis(port=12000)\n means = \"-\"\n stds = \"-\"\n if self.scale != 'off':\n if self.timestep == 0:\n print(\"Time to measure features!\")\n if self.distributed:\n worker_args = \\\n {\n 'config': self.config,\n 'test_mode': False,\n }\n hlp.launch_workers(worker_args, self.n_workers)\n paths = []\n for i in range(self.n_workers):\n paths += hlp.load_object(self.variables_server.get(\"paths_{}\".format(i)))\n else:\n self.test_mode = False\n self.make_rollout()\n paths = self.paths\n\n for path in paths:\n self.sums += path[\"sumobs\"]\n self.sumsqrs += path[\"sumsqrobs\"]\n self.sumtime += path[\"observations\"].shape[0]\n\n stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))\n means = self.sums / self.sumtime\n print(\"Init means: {}\".format(means))\n print(\"Init stds: {}\".format(stds))\n self.variables_server.set(\"means\", hlp.dump_object(means))\n self.variables_server.set(\"stds\", hlp.dump_object(stds))\n self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))\n while True:\n print(\"Iteration {}\".format(self.timestep))\n start_time = time.time()\n\n if self.distributed:\n weights = self.get_weights()\n for i, weight in enumerate(weights):\n self.variables_server.set(\"weight_\" + str(i), hlp.dump_object(weight))\n worker_args = \\\n {\n 'config': self.config,\n 'test_mode': False,\n }\n hlp.launch_workers(worker_args, self.n_workers)\n paths = []\n for i in range(self.n_workers):\n paths += hlp.load_object(self.variables_server.get(\"paths_{}\".format(i)))\n else:\n self.test_mode = False\n self.make_rollout()\n paths = self.paths\n\n observations = np.concatenate([path[\"observations\"] for path in paths])\n actions = np.concatenate([path[\"action_tuples\"] for path in paths])\n action_means = []\n action_stds = []\n returns = []\n advantages = []\n for path in paths:\n self.sums += path[\"sumobs\"]\n self.sumsqrs += path[\"sumsqrobs\"]\n self.sumtime += path[\"rewards\"].shape[0]\n action_means += [d[0] for d in path[\"dist_tuples\"]]\n action_stds += [d[1] for d in path[\"dist_tuples\"]]\n returns += hlp.discount(path[\"rewards\"], self.gamma, path[\"timestamps\"]).tolist()\n values = self.sess.run(self.value, feed_dict={self.state_input: path[\"observations\"]})\n values = np.append(values, 0 if path[\"terminated\"] else values[-1])\n deltas = (path[\"rewards\"] + self.gamma * values[1:] - values[:-1])\n advantages += hlp.discount(deltas, self.gamma, path[\"timestamps\"]).tolist()\n returns = np.array(returns)\n advantages = np.array(advantages)\n action_means = np.array(action_means)\n action_stds = np.array(action_stds)\n\n if self.normalize == 'ranks':\n ranks = np.zeros_like(advantages)\n ranks[np.argsort(advantages)] = np.arange(ranks.shape[0], dtype=np.float32) / (ranks.shape[0] - 1)\n ranks -= 0.5\n advantages = ranks[:]\n elif self.normalize == 'center':\n advantages -= np.mean(advantages)\n advantages /= (np.std(advantages, ddof=1) + 0.001)\n\n feed_dict = {self.state_input: observations,\n self.targets[\"return\"]: returns,\n self.targets[\"advantage\"]: advantages,\n self.targets[\"old_mean\"]: action_means,\n self.targets[\"old_std\"]: action_stds,\n self.targets[\"action\"]: actions}\n\n for i in range(self.value_updates):\n self.sess.run(self.value_train_op, feed_dict)\n\n train_rewards = np.array([path[\"rewards\"].sum() for path in paths])\n train_lengths = np.array([len(path[\"rewards\"]) for path in paths])\n\n thprev = self.get_flat()\n\n def fisher_vector_product(p):\n feed_dict[self.targets[\"flat_tangent\"]] = p\n return self.sess.run(self.fisher_vector_product, feed_dict) + 0.1 * p\n\n g = self.sess.run(self.policy_grad, feed_dict)\n stepdir = hlp.conjugate_gradient(fisher_vector_product, -g)\n\n shs = .5 * stepdir.dot(fisher_vector_product(stepdir))\n lm = np.sqrt(shs / self.max_kl)\n fullstep = stepdir / (lm + 1e-18)\n\n def loss_kl(th):\n self.set_from_flat(th)\n return self.sess.run([self.loss, self.KL], feed_dict=feed_dict)\n\n theta = hlp.linesearch(loss_kl, thprev, fullstep, self.max_kl)\n self.set_from_flat(theta)\n\n lossafter, kloldnew = self.sess.run([self.loss, self.KL], feed_dict=feed_dict)\n\n print(\"Time for testing!\")\n\n if self.distributed:\n weights = self.get_weights()\n for i, weight in enumerate(weights):\n self.variables_server.set(\"weight_\" + str(i), hlp.dump_object(weight))\n worker_args = \\\n {\n 'config': self.config,\n 'test_mode': True,\n }\n hlp.launch_workers(worker_args, self.n_workers)\n paths = []\n for i in range(self.n_workers):\n paths += hlp.load_object(self.variables_server.get(\"paths_{}\".format(i)))\n else:\n self.test_mode = True\n self.make_rollout()\n paths = self.paths\n\n total_rewards = np.array([path[\"total\"] for path in paths])\n eplens = np.array([len(path[\"rewards\"]) for path in paths])\n\n if self.scale == 'full':\n stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))\n means = self.sums / self.sumtime\n self.variables_server.set(\"means\", hlp.dump_object(means))\n self.variables_server.set(\"stds\", hlp.dump_object(stds))\n self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))\n\n print(\"\"\"\n-------------------------------------------------------------\nMean test score: {test_scores}\nMean train score: {train_scores}\nMean test episode length: {test_eplengths}\nMean train episode length: {train_eplengths}\nMax test score: {max_test}\nMax train score: {max_train}\nKL between old and new {kl}\nLoss after update {loss}\nMean of features: {means}\nStd of features: {stds}\nTime for iteration: {tt}\n-------------------------------------------------------------\n \"\"\".format(\n means=means,\n stds=stds,\n test_scores=np.mean(total_rewards),\n test_eplengths=np.mean(eplens),\n train_scores=np.mean(train_rewards),\n train_eplengths=np.mean(train_lengths),\n max_test=np.max(total_rewards),\n max_train=np.max(train_rewards),\n kl=kloldnew,\n loss=lossafter,\n tt=time.time() - start_time\n ))\n self.timestep += 1\n self.train_scores.append(np.mean(train_rewards))\n self.test_scores.append(np.mean(total_rewards))\n if self.timestep % self.save_every == 0:\n self.save(self.config[:-5])", "sub_path": "algos/trpo_continuous.py", "file_name": "trpo_continuous.py", "file_ext": "py", "file_size_in_byte": 17794, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.feed_forward.FFContinuous", "line_number": 15, "usage_type": "name"}, {"api_name": "models.feed_forward.FFContinuous.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "models.feed_forward.FFContinuous", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.set_printoptions", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.exp", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 71, "usage_type": "call"}, {"api_name": "helpers.utils.flatgrad", "line_number": 76, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 76, "usage_type": "name"}, {"api_name": "tensorflow.stop_gradient", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.stop_gradient", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 80, "usage_type": "call"}, {"api_name": "helpers.utils.var_shape", "line_number": 81, "usage_type": "attribute"}, {"api_name": "helpers.utils", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 89, "usage_type": "call"}, {"api_name": "helpers.utils.flatgrad", "line_number": 90, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 90, "usage_type": "name"}, {"api_name": "helpers.utils.GetFlat", "line_number": 92, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 92, "usage_type": "name"}, {"api_name": "helpers.utils.SetFromFlat", "line_number": 93, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 93, "usage_type": "name"}, {"api_name": "tensorflow.reduce_mean", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.global_variables", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 128, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.global_variables", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 148, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 160, "usage_type": "call"}, {"api_name": "helpers.utils.load_object", "line_number": 163, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 163, "usage_type": "name"}, {"api_name": "helpers.utils.load_object", "line_number": 164, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 164, "usage_type": "name"}, {"api_name": "helpers.utils.load_object", "line_number": 169, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 169, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 215, "usage_type": "call"}, {"api_name": "helpers.utils.dump_object", "line_number": 224, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 224, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 230, "usage_type": "call"}, {"api_name": "os.setsid", "line_number": 230, "usage_type": "attribute"}, {"api_name": "redis.Redis", "line_number": 231, "usage_type": "call"}, {"api_name": "helpers.utils.launch_workers", "line_number": 243, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 243, "usage_type": "name"}, {"api_name": "helpers.utils.load_object", "line_number": 246, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 246, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 257, "usage_type": "call"}, {"api_name": "helpers.utils.dump_object", "line_number": 261, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 261, "usage_type": "name"}, {"api_name": "helpers.utils.dump_object", "line_number": 262, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 262, "usage_type": "name"}, {"api_name": "time.time", "line_number": 266, "usage_type": "call"}, {"api_name": "helpers.utils.dump_object", "line_number": 271, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 271, "usage_type": "name"}, {"api_name": "helpers.utils.launch_workers", "line_number": 277, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 277, "usage_type": "name"}, {"api_name": "helpers.utils.load_object", "line_number": 280, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 280, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 287, "usage_type": "call"}, {"api_name": "helpers.utils.discount", "line_number": 298, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 298, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 300, "usage_type": "call"}, {"api_name": "helpers.utils.discount", "line_number": 302, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 302, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 310, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "helpers.utils.conjugate_gradient", "line_number": 337, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 337, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 340, "usage_type": "call"}, {"api_name": "helpers.utils.linesearch", "line_number": 347, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 347, "usage_type": "name"}, {"api_name": "helpers.utils.dump_object", "line_number": 357, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 357, "usage_type": "name"}, {"api_name": "helpers.utils.launch_workers", "line_number": 363, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 363, "usage_type": "name"}, {"api_name": "helpers.utils.load_object", "line_number": 366, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 366, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 376, "usage_type": "call"}, {"api_name": "helpers.utils.dump_object", "line_number": 378, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 378, "usage_type": "name"}, {"api_name": "helpers.utils.dump_object", "line_number": 379, "usage_type": "call"}, {"api_name": "helpers.utils", "line_number": 379, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 404, "usage_type": "call"}, {"api_name": "time.time", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 411, "usage_type": "call"}]} +{"seq_id": "431575194", "text": "\"\"\"\nGiven two arrays arr1 and arr2, the elements of arr2 are distinct, and all elements in arr2 are also\nin arr1.\n\nSort the elements of arr1 such that the relative ordering of items in arr1 are the same as in arr2.\nElements that don't appear in arr2 should be placed at the end of arr1 in ascending order.\n\"\"\"\nclass Solution(object):\n def relativeSortArray(self, arr1, arr2):\n \"\"\"\n :type arr1: List[int]\n :type arr2: List[int]\n :rtype: List[int]\n \"\"\"\n d = {v: i for i, v in enumerate(arr2)}\n return sorted(arr1, key=lambda i: d.get(i, 1000 + i)) # how to explain this\n\n def relativeSortArray2(self, arr1, arr2):\n import collections\n ans, cnt = [], collections.Counter(arr1) # Count each number in arr1\n for i in arr2:\n if cnt[i]: ans.extend([i] * cnt.pop(i)) # Sort the common numbers in both arrays by the order of arr2.\n for i in range(1001):\n if cnt[i]: ans.extend([i] * cnt.pop(i)) # Sort the numbers only in arr1.\n return ans\n\n def relativeSortArray3(self, arr1, arr2):\n # Count sort\n cnt = [0] * 1001\n for n in arr1:\n cnt[n] += 1\n i = 0\n for n in arr2:\n while cnt[n] > 0:\n arr1[i] = n\n i += 1\n cnt[n] -= 1\n for n in range(len(cnt)):\n while cnt[n] > 0:\n arr1[i] = n\n i += 1\n cnt[n] -= 1\n return arr1\n\n\narr1, arr2 = [2,3,1,3,2,4,6,7,9,2,19], [2,1,4,3,9,6]\nprint(Solution().relativeSortArray3(arr1, arr2))\n\n", "sub_path": "1122RelativeSortArr.py", "file_name": "1122RelativeSortArr.py", "file_ext": "py", "file_size_in_byte": 1600, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.Counter", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "529310696", "text": "\nimport numpy as np\nfrom math import sqrt, isnan\nfrom obspy.signal.cross_correlation import xcorr\nfrom scipy.signal.signaltools import fftconvolve\nimport warnings\n\ndef my_centered(arr, newsize):\n # get the center portion of a 1-dimensional array\n n = len(arr)\n i0 = (n - newsize) // 2\n if n%2 == 0:\n i0 += 1\n i1 = i0 + newsize\n return arr[i0:i1]\n\ndef classic_xcorr(trace1, trace2, max_lag_samples):\n \n x_corr = xcorr(trace1.data, trace2.data,\\\n max_lag_samples, True)[2]\n \n return x_corr\n\ndef get_correlation_params(data1,data2):\n\n if len(data1) == 0 or len(data2) == 0:\n return(0,0,0,0,0,0)\n # Get the signal energy; most people normalize by the square root of that\n ren1 = np.correlate(data1,data1,mode='valid')[0]\n ren2 = np.correlate(data2,data2,mode='valid')[0]\n\n # Get the window rms\n \n rms1 = sqrt(ren1 / len(data1))\n \n rms2 = sqrt(ren2 / len(data2)) \n \n \n # A further parameter to 'see' impulsive events: range of standard deviations\n nsmp = int(len(data1)/4)\n\n std1 = [0,0,0,0]\n std2 = [0,0,0,0]\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n for i in range(4):\n \n \n std1[i] = np.std(data1[i*nsmp:(i+1)*nsmp])\n if isnan(std1[i]):\n return(0,0,0,0,0,0)\n std2[i] = np.std(data2[i*nsmp:(i+1)*nsmp])\n if isnan(std1[i]):\n return(0,0,0,0,0,0)\n \n # Add small value not to divide by zero\n tol = np.max(std1) * 1e-6 \n if tol != 0:\n rng1 = max(std1) / (min(std1) + tol)\n rng2 = max(std2) / (min(std2) + tol)\n else:\n rng1 = 0\n rng2 = 0\n\n return(rms1,rms2,ren1,ren2,rng1,rng2)\n\n \ndef cross_covar(data1, data2, max_lag_samples, normalize, params=False):\n \n #ToDo: deal with params\n \n# remove mean and normalize; this should have no effect on the energy-normalized \n#correlation result, but may avoid precision issues if trace values are very small\n #if normalize:\n # scale1 = 1./np.max(np.abs(data1))\n # scale2 = 1./np.max(np.abs(data2))\n # data1*=scale1\n # data2*=scale2\n \n if len(data1) == 0 or len(data2) == 0:\n return([],[])\n\n \n data1-=np.mean(data1)\n data2-=np.mean(data2)\n \n \n # Make the data more convenient for C function np.correlate\n\n data1 = np.ascontiguousarray(data1, np.float32)\n data2 = np.ascontiguousarray(data2, np.float32)\n \n if params:\n params = get_correlation_params(data1,data2)\n ren1, ren2 = params[2:4]\n else:\n ren1 = np.correlate(data1,data1,mode='valid')[0]\n ren2 = np.correlate(data2,data2,mode='valid')[0]\n\n if ren1 == 0.0 or ren2 == 0.0 and normalize == True:\n return([],[])\n\n\n\n # scipy.fftconvolve is way faster than np.correlate, and zeropads for non-circular convolution\n ccv = fftconvolve(data1[::-1],data2,mode='same')\n \n if normalize:\n ccv /= ( sqrt(ren1) * sqrt(ren2) )\n\n return my_centered(ccv,2*max_lag_samples+1),params\n\n\n\n\n", "sub_path": "ants_2/tools/correlations.py", "file_name": "correlations.py", "file_ext": "py", "file_size_in_byte": 3074, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "obspy.signal.cross_correlation.xcorr", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.correlate", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.correlate", "line_number": 30, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 34, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 45, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 50, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 53, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.ascontiguousarray", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.correlate", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.correlate", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.signal.signaltools.fftconvolve", "line_number": 107, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "455762601", "text": "from ...common.decorators import *\nfrom ...common.errors import *\nfrom enum import IntEnum\nfrom .data_element import DataElement\nfrom decimal import *\nimport json\nimport dateutil.parser\n\nclass CorrelationQueryValue:\n def __init__(self, value=None, datatype=None):\n if type(value) == DataElement:\n self.dataelement = value\n else:\n self.dataelement = DataElement(value=value, datatype=datatype)\n\n @staticmethod\n def from_dict(dct):\n val = None\n try:\n de = DataElement.from_dict(dct)\n val = InActionQueryValue(value=de)\n except Exception as e:\n raise DeserializationError(CorrelationQueryValue, 'dict', dct) from e\n return val\n\n def to_dict(self):\n dct = {}\n try:\n dct = self.dataelement.to_dict()\n except Exception as e:\n raise SerializationError(CorrelationQueryValue, 'dict') from e\n return dct\n\n\nclass CorrelationQueryResult(IntEnum):\n INSTANCE = 1\n ACTION = 2\n TOPOLOGY = 4\n\n def to_string_list(val):\n lst = []\n if (val&CorrelationQueryResult.INSTANCE) == CorrelationQueryResult.INSTANCE:\n lst.append(\"Instance\")\n if (val&CorrelationQueryResult.ACTION) == CorrelationQueryResult.ACTION:\n lst.append(\"Action\")\n if (val&CorrelationQueryResult.TOPOLOGY) == CorrelationQueryResult.TOPOLOGY:\n lst.append(\"Topology\")\n return lst\n\n\n @staticmethod\n def from_string_list(lst):\n val = 0\n for e in lst:\n if e == \"Instance\":\n val |= CorrelationQueryResult.INSTANCE\n elif e == \"Action\":\n val |= CorrelationQueryResult.ACTION\n elif e == \"Topology\":\n val |= CorrelationQueryResult.TOPOLOGY\n return val\n\n\nclass CorrelationQuery:\n def __init__(self, elements=None, query_results=CorrelationQueryResult.INSTANCE):\n self.elements = elements\n self.query_results = query_results\n\n @staticmethod\n def from_dict(dct):\n retval = None\n try:\n if type(dct).__name__ == 'list':\n retval = []\n for d in dct:\n retval.append(CorrelationQuery.from_dct(d))\n else:\n elements = list(map(CorrelationQueryValue.from_dict, dct['data'])) if 'data' in dct else None\n query_result = CorrelationQueryResult.from_string_list(dct.get('link', []))\n retval = CorrelationQuery(elements, query_results)\n except Exception as e:\n raise DeserializationError(CorrelationQuery, 'dict', dct) from e\n return retval\n\n @staticmethod\n def from_json(json_string):\n query = None\n try:\n data = json.loads(json_string)\n query = InActionQuery.from_dict(data)\n except Exception as e:\n raise DeserializationError(InActionQuery, 'json', json_string) from e\n return query\n\n def to_dict(self):\n dct = None\n try:\n dct = {}\n if self.and_set is not None:\n dct['and'] = list(map(lambda o: o.to_dict(), self.and_set))\n if self.or_set is not None:\n dct['or'] = list(map(lambda o: o.to_dict(), self.or_set))\n if self.not_set is not None:\n dct['not'] = list(map(lambda o: o.to_dict(), self.not_set))\n if self.action_filter is not None:\n dct['action_id'] = self.action_filter\n if self.topology_filter is not None:\n dct['topology_id'] = self.topology_filter\n except Exception as e:\n raise SerializationError(InActionQuery, 'dict') from e\n return dct\n\n def to_json(self):\n json_string = \"\"\n try:\n dct = self.to_dict()\n json_string = json.dumps(dct)\n except Exception as e:\n raise SerializationError(InActionQuery, 'json') from e\n return json_string\n\n\n", "sub_path": "dyna/dynizer/types/correlation_query.py", "file_name": "correlation_query.py", "file_ext": "py", "file_size_in_byte": 4009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "data_element.DataElement", "line_number": 11, "usage_type": "name"}, {"api_name": "data_element.DataElement", "line_number": 14, "usage_type": "call"}, {"api_name": "data_element.DataElement.from_dict", "line_number": 20, "usage_type": "call"}, {"api_name": "data_element.DataElement", "line_number": 20, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "435097115", "text": "import functools\nimport logging\nimport time\n\n\ndef logged(log='trace'):\n def wrap(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n logger = logging.getLogger(log)\n logger.debug(\"Calling function '{}' with args={} kwargs={}\"\n .format(function.__name__, args, kwargs))\n try:\n response = function(*args, **kwargs)\n except Exception as error:\n logger.debug(\"Function '{}' raised {} with error '{}'\"\n .format(function.__name__,\n error.__class__.__name__,\n str(error)))\n raise error\n logger.debug(\"Function '{}' returned {}\"\n .format(function.__name__,\n response))\n return response\n return wrapper\n return wrap\n\ndef slow_down(func):\n @functools.wraps(func)\n def wrapper_slow_down(*args, **kwargs):\n time.sleep(1)\n return func(*args, **kwargs)\n return wrapper_slow_down\n\n\ndef debug(func):\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n print(f\"Calling {func.__name__}({signature})\")\n value = func(*args, **kwargs)\n print(f\"{func.__name__!r} returned {value!r}\")\n return value\n return wrapper_debug\n\n\n\ndef timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter()\n value = func(*args, **kwargs)\n end_time = time.perf_counter()\n run_time = end_time - start_time\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\n return value\n return wrapper_timer", "sub_path": "ProjektyStudentow/2018_MP/Decorator.py", "file_name": "Decorator.py", "file_ext": "py", "file_size_in_byte": 1951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 29, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 37, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 53, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 55, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "239253718", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport cms.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0012_auto_20150607_2207'),\n ('staff', '0003_auto_20150825_1126'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='staffmember',\n name='contact',\n field=cms.models.fields.PlaceholderField(related_name='contact', slotname=b'staff_contact', editable=False, to='cms.Placeholder', null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='staffmember',\n name='bio',\n field=cms.models.fields.PlaceholderField(related_name='bio', slotname=b'staff_bio', editable=False, to='cms.Placeholder', null=True),\n preserve_default=True,\n ),\n ]\n", "sub_path": "staff/migrations/0004_auto_20150901_1033.py", "file_name": "0004_auto_20150901_1033.py", "file_ext": "py", "file_size_in_byte": 896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 16, "usage_type": "name"}, {"api_name": "cms.models.fields.models.fields.PlaceholderField", "line_number": 19, "usage_type": "call"}, {"api_name": "cms.models.fields.models", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cms.models.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "cms.models.fields.models.fields.PlaceholderField", "line_number": 25, "usage_type": "call"}, {"api_name": "cms.models.fields.models", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cms.models.fields", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "279378600", "text": "from PyQt5.QtCore import QObject, QThreadPool\n\nfrom neanno.utils.list import get_set_of_list_and_keep_sequence, not_none\nfrom neanno.utils.threading import ParallelWorker, ParallelWorkerSignals\nfrom neanno.utils.text import extract_annotations_as_list, annotate_text\n\n\nclass PredictionPipeline(QObject):\n \"\"\" Predicts different annotations for a text.\"\"\"\n\n _predictors = {}\n _threadpool = QThreadPool()\n\n def add_predictor(self, predictor):\n self._predictors[predictor.name] = predictor\n\n def remove_predictor(self, name):\n del self._predictors[name]\n\n def has_predictor(self, name):\n return name in self._predictors\n\n def has_predictors(self):\n return len(self._predictors) > 0\n\n def get_predictor(self, name):\n return self._predictors[name]\n\n def get_all_predictors(self):\n return self._predictors.values()\n\n def get_all_prediction_enabled_predictors(self):\n return [\n predictor\n for predictor in self._predictors.values()\n if predictor.is_prediction_enabled\n ]\n\n def invoke_predictors(self, function_name, *args, **kwargs):\n for predictor in self.get_all_predictors():\n if hasattr(predictor, function_name):\n getattr(predictor, function_name)(*args, **kwargs)\n\n def collect_from_predictors(\n self, function_name, make_result_distinct, filter_none_values, *args, **kwargs\n ):\n result = []\n for predictor in self.get_all_predictors():\n if hasattr(predictor, function_name):\n predictor_response = getattr(predictor, function_name)(*args, **kwargs)\n if predictor_response:\n result = result.extend(\n getattr(predictor, function_name)(*args, **kwargs)\n )\n if filter_none_values:\n result = not_none(result)\n if make_result_distinct:\n result = get_set_of_list_and_keep_sequence(result)\n return result\n\n def learn_from_annotated_text(self, annotated_text, language):\n self.invoke_predictors(\"learn_from_annotated_text\", annotated_text, language)\n\n def learn_from_annotated_dataset_async(\n self,\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n signal_slots=ParallelWorkerSignals.default_slots(),\n ):\n parallel_worker = ParallelWorker(\n self.invoke_predictors,\n signal_slots,\n \"learn_from_annotated_dataset\",\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n )\n self._threadpool.start(parallel_worker)\n\n def learn_from_annotated_dataset(\n self,\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n signal_slots=ParallelWorkerSignals.default_slots(),\n ):\n # call the async version of this method\n self.learn_from_annotated_dataset_async(\n dataset,\n text_column,\n is_annotated_column,\n language_column,\n categories_column,\n categories_to_train,\n entity_codes_to_train,\n signal_slots,\n )\n # wait for done\n # note: this waits until the entire threadpool is done\n # TODO: check if there is a way to wait only for this worker\n self._threadpool.waitForDone()\n\n def predict_inline_annotations(self, text, language=\"en-US\"):\n if not text:\n return \"\"\n annotations = []\n for predictor in self.get_all_prediction_enabled_predictors():\n annotations_by_predictor = extract_annotations_as_list(\n predictor.predict_inline_annotations(text, language)\n )\n annotations.extend(annotations_by_predictor)\n return annotate_text(text, annotations)\n\n def predict_text_categories(self, text, language=\"en-US\"):\n if not text:\n return \"\"\n result = []\n for predictor in self.get_all_prediction_enabled_predictors():\n new_text_categories = predictor.predict_text_categories(text, language)\n result.extend(new_text_categories)\n result = get_set_of_list_and_keep_sequence(result)\n return result\n\n def get_parent_terms_for_named_entity(self, term, entity_code):\n return \", \".join(\n not_none(\n self.collect_from_predictors(\n \"get_parent_terms_for_named_entity\", True, True, term, entity_code\n )\n )\n )\n\n def mark_key_term_for_removal(self, key_term):\n self.invoke_predictors(\"mark_key_term_for_removal\", key_term)\n\n def reset_key_terms_marked_for_removal(self):\n self.invoke_predictors(\"reset_key_terms_marked_for_removal\")\n\n def mark_named_entity_term_for_removal(self, term, entity_code):\n self.invoke_predictors(\"mark_named_entity_term_for_removal\", term, entity_code)\n\n def reset_named_entity_terms_marked_for_removal(self):\n self.invoke_predictors(\"reset_named_entity_terms_marked_for_removal\")\n", "sub_path": "neanno/prediction/pipeline.py", "file_name": "pipeline.py", "file_ext": "py", "file_size_in_byte": 5435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PyQt5.QtCore.QObject", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QThreadPool", "line_number": 12, "usage_type": "call"}, {"api_name": "neanno.utils.list.not_none", "line_number": 56, "usage_type": "call"}, {"api_name": "neanno.utils.list.get_set_of_list_and_keep_sequence", "line_number": 58, "usage_type": "call"}, {"api_name": "neanno.utils.threading.ParallelWorkerSignals.default_slots", "line_number": 73, "usage_type": "call"}, {"api_name": "neanno.utils.threading.ParallelWorkerSignals", "line_number": 73, "usage_type": "name"}, {"api_name": "neanno.utils.threading.ParallelWorker", "line_number": 75, "usage_type": "call"}, {"api_name": "neanno.utils.threading.ParallelWorkerSignals.default_slots", "line_number": 98, "usage_type": "call"}, {"api_name": "neanno.utils.threading.ParallelWorkerSignals", "line_number": 98, "usage_type": "name"}, {"api_name": "neanno.utils.text.extract_annotations_as_list", "line_number": 121, "usage_type": "call"}, {"api_name": "neanno.utils.text.annotate_text", "line_number": 125, "usage_type": "call"}, {"api_name": "neanno.utils.list.get_set_of_list_and_keep_sequence", "line_number": 134, "usage_type": "call"}, {"api_name": "neanno.utils.list.not_none", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "573664984", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Built-in BERT datasets class for multiple framework backends.\"\"\"\n\nimport os\nimport logging\nimport json\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Union\nfrom neural_compressor.utils.utility import LazyImport\nfrom .dataset import dataset_registry, Dataset\ntorch = LazyImport('torch')\ntransformers = LazyImport('transformers')\n\nlogger = logging.getLogger(\"neural_compressor\")\n\n@dataset_registry(dataset_type=\"bert\", framework=\"pytorch\", dataset_format='')\nclass PytorchBertDataset(Dataset):\n \"\"\"PyTorch dataset used for model Bert.\n \n This Dataset is to construct from the Bert TensorDataset and not a full implementation\n from yaml config. The original repo link is: https://github.com/huggingface/transformers.\n When you want use this Dataset, you should add it before you initialize your DataLoader.\n (TODO) add end to end support for easy config by yaml by adding the method of\n load examples and process method.\n\n Args: dataset (list): list of data.\n task (str): the task of the model, support \"classifier\", \"squad\".\n model_type (str, default='bert'): model type, support 'distilbert', 'bert',\n 'xlnet', 'xlm'.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according\n to specific conditions.\n\n Examples::\n\n dataset = [[\n [101,2043,2001],\n [1,1,1],\n [[0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0]],\n [1,1,1],\n [1,1,1],\n [[0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0]]\n ]]\n dataset = PytorchBertDataset(dataset=dataset, task='classifier', model_type='bert',\n transform=preprocess, filter=filter)\n \"\"\"\n\n def __init__(self, dataset, task, model_type='bert', transform=None, filter=None):\n \"\"\"Initialize the attributes of class.\"\"\"\n self.dataset = dataset\n assert task in (\"classifier\", \"squad\"), \"Bert task support only classifier squad\"\n self.task = task\n self.transform = transform\n self.model_type = model_type\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return len(self.dataset)\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index)\n \"\"\"\n sample = self.dataset[index]\n if self.transform is not None:\n sample = self.transform(sample)\n if self.task == 'classifier':\n inputs = {\n 'input_ids': sample[0],\n 'attention_mask': sample[1],\n 'labels': sample[3]}\n\n if self.model_type != 'distilbert':\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n if self.model_type in ['bert', 'xlnet']:\n inputs['token_type_ids'] = sample[2]\n sample = (inputs, inputs['labels'])\n\n elif self.task == 'squad':\n inputs = {\n 'input_ids': sample[0],\n 'attention_mask': sample[1], }\n if self.model_type != 'distilbert':\n # XLM, DistilBERT and RoBERTa don't use segment_ids\n inputs['token_type_ids'] = sample[2] if self.model_type in [\n 'bert', 'xlnet'] else None\n if self.model_type in ['xlnet', 'xlm']:\n inputs.update({'cls_index': sample[4], 'p_mask': sample[5]})\n example_indices = sample[3]\n sample = (inputs, example_indices)\n return sample\n\n\n@dataset_registry(dataset_type=\"GLUE\", framework=\"onnxrt_qlinearops, \\\n onnxrt_integerops\", dataset_format='')\nclass ONNXRTBertDataset(Dataset):\n \"\"\"ONNXRT dataset used for model Bert.\n\n Args: data_dir (str): The input data dir.\n model_name_or_path (str): Path to pre-trained student model or shortcut name,\n selected in the list:\n max_seq_length (int, default=128): The maximum length after tokenization.\n Sequences longer than this will be truncated,\n sequences shorter will be padded.\n do_lower_case (bool, default=True): Whether to lowercase the input when tokenizing.\n task (str, default=mrpc): The name of the task to fine-tune.\n Choices include mrpc, qqp, qnli, rte,\n sts-b, cola, mnli, wnli.\n model_type (str, default='bert'): model type, support 'distilbert', 'bert',\n 'mobilebert', 'roberta'.\n dynamic_length (bool, default=False): Whether to use fixed sequence length.\n evaluate (bool, default=True): Whether do evaluation or training.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according\n to specific conditions.\n\n Examples::\n\n dataset = ONNXRTBertDataset(data_dir=data_dir, model_name_or_path='bert-base-uncase',\n transform=preprocess, filter=filter)\n \"\"\"\n def __init__(self, data_dir, model_name_or_path, max_seq_length=128,\\\n do_lower_case=True, task='mrpc', model_type='bert', dynamic_length=False,\\\n evaluate=True, transform=None, filter=None):\n \"\"\"Initialize the attributes of class.\"\"\"\n task = task.lower()\n model_type = model_type.lower()\n assert task in ['mrpc', 'qqp', 'qnli', 'rte', 'sts-b', 'cola', \\\n 'mnli', 'wnli'], 'Unsupported task type'\n assert model_type in ['distilbert', 'bert', 'mobilebert', 'roberta'], 'Unsupported \\\n model type'\n\n self.dynamic_length = dynamic_length\n self.model_type = model_type\n self.max_seq_length = max_seq_length\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path,\n do_lower_case=do_lower_case)\n self.dataset = load_and_cache_examples(data_dir, model_name_or_path, \\\n max_seq_length, task, model_type, tokenizer, evaluate)\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return len(self.dataset)\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index)\n \"\"\"\n return self.dataset[index]\n\n\ndef load_and_cache_examples(data_dir, model_name_or_path, max_seq_length, task, \\\n model_type, tokenizer, evaluate):\n \"\"\"Load and cache the examples.\n\n Helper Function for ONNXRTBertDataset.\n \"\"\"\n from torch.utils.data import TensorDataset\n\n processor = transformers.glue_processors[task]()\n output_mode = transformers.glue_output_modes[task]\n # Load data features from cache or dataset file\n if not os.path.exists(\"./dataset_cached\"):\n os.makedirs(\"./dataset_cached\")\n cached_features_file = os.path.join(\"./dataset_cached\", 'cached_{}_{}_{}_{}'.format(\n 'dev' if evaluate else 'train',\n list(filter(None, model_name_or_path.split('/'))).pop(),\n str(max_seq_length),\n str(task)))\n if os.path.exists(cached_features_file):\n logger.info(\"Load features from cached file {}.\".format(cached_features_file))\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Create features from dataset file at {}.\".format(data_dir))\n label_list = processor.get_labels()\n if task in ['mnli', 'mnli-mm'] and model_type in ['roberta']:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n examples = processor.get_dev_examples(data_dir) if evaluate else \\\n processor.get_train_examples(data_dir)\n features = convert_examples_to_features(examples,\n tokenizer,\n task=task,\n label_list=label_list,\n max_length=max_seq_length,\n output_mode=output_mode,\n )\n logger.info(\"Save features into cached file {}.\".format(cached_features_file))\n torch.save(features, cached_features_file)\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, \\\n all_seq_lengths, all_labels)\n return dataset\n\n\ndef convert_examples_to_features(\n examples,\n tokenizer,\n max_length=128,\n task=None,\n label_list=None,\n output_mode=\"classification\",\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"Convert examples to features.\n\n Helper function for load_and_cache_examples.\n \"\"\"\n processor = transformers.glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Use label list {} for task {}.\".format(label_list, task))\n label_map = {label: i for i, label in enumerate(label_list)}\n features = []\n for (ex_index, example) in enumerate(examples):\n inputs = tokenizer.encode_plus(\n example.text_a,\n example.text_b,\n add_special_tokens=True,\n max_length=max_length,\n return_token_type_ids=True,\n truncation=True,\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n seq_length = len(input_ids)\n padding_length = max_length - len(input_ids)\n\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + \\\n ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \\\n \"Error with input_ids length {} vs {}\".format(\n len(input_ids), max_length)\n assert len(attention_mask) == max_length, \\\n \"Error with attention_mask length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \\\n \"Error with token_type_ids length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n feats = InputFeatures(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n seq_length=seq_length,\n )\n features.append(feats)\n return features\n\n\n@dataclass(frozen=True)\nclass InputFeatures:\n \"\"\"Single set of features of data.\n\n Property names are the same names as the corresponding inputs to a model.\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED,\n ``0`` for MASKED (padded) tokens.\n token_type_ids: (Optional) Segment token indices to indicate first and second\n portions of the inputs. Only some models use them.\n label: (Optional) Label corresponding to the input. Int for classification problems,\n float for regression problems.\n seq_length: (Optional) The length of input sequence before padding.\n \"\"\"\n\n input_ids: List[int]\n attention_mask: Optional[List[int]] = None\n token_type_ids: Optional[List[int]] = None\n label: Optional[Union[int, float]] = None\n seq_length: Optional[List[int]] = None\n\n def to_json_string(self):\n \"\"\"Serialize this instance to a JSON string.\"\"\"\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"\n\n\n@dataset_registry(dataset_type=\"bert\", framework=\"tensorflow, tensorflow_itex\", dataset_format='')\nclass TensorflowBertDataset(Dataset):\n \"\"\"Tensorflow dataset used for model Bert.\n\n This dataset supports tfrecord data, please refer to Guide to create tfrecord file first.\n\n Args: root (str): path of dataset.\n label_file (str): path of label file.\n task (str, default='squad'): task type of model.\n model_type (str, default='bert'): model type, support 'bert'.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according\n to specific conditions\n \"\"\"\n\n def __init__(self, root, label_file, task='squad',\n model_type='bert', transform=None, filter=None):\n \"\"\"Initialize the attributes of class.\"\"\"\n import json\n with open(label_file) as lf:\n label_json = json.load(lf)\n assert label_json['version'] == '1.1', 'only support squad 1.1'\n self.label = label_json['data']\n self.root = root\n self.transform = transform\n self.filter = filter\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index).\n \"\"\"\n return self.root, self.label\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return 1\n\n\nclass ParseDecodeBert():\n \"\"\"Helper function for TensorflowModelZooBertDataset.\n\n Parse the features from sample.\n \"\"\"\n\n def __call__(self, sample):\n \"\"\"Parse the sample data.\n\n Args:\n sample: Data to be parsed.\n \"\"\"\n import tensorflow as tf\n # Dense features in Example proto.\n feature_map = {\n 'input_ids':\n tf.compat.v1.VarLenFeature(dtype=tf.int64),\n 'input_mask':\n tf.compat.v1.VarLenFeature(dtype=tf.int64),\n 'segment_ids':\n tf.compat.v1.VarLenFeature(dtype=tf.int64),\n }\n\n features = tf.io.parse_single_example(sample, feature_map)\n\n input_ids = features['input_ids'].values\n input_mask = features['input_mask'].values\n segment_ids = features['segment_ids'].values\n\n return (input_ids, input_mask, segment_ids)\n\n@dataset_registry(dataset_type=\"mzbert\", framework=\"tensorflow, tensorflow_itex\", dataset_format='')\nclass TensorflowModelZooBertDataset(Dataset):\n \"\"\"Tensorflow dataset for three-input Bert in tf record format.\n\n Root is a full path to tfrecord file, which contains the file name.\n Please use Resize transform when batch_size > 1\n Args: root (str): path of dataset.\n label_file (str): path of label file.\n task (str, default='squad'): task type of model.\n model_type (str, default='bert'): model type, support 'bert'.\n transform (transform object, default=None): transform to process input data.\n filter (Filter objects, default=None): filter out examples according.\n \"\"\"\n\n def __init__(self, root, label_file, task='squad',\n model_type='bert', transform=None, filter=None, num_cores=28):\n \"\"\"Initialize the attributes of class.\"\"\"\n import json\n with open(label_file) as lf:\n label_json = json.load(lf)\n assert label_json['version'] == '1.1', 'only support squad 1.1'\n self.label = label_json['data']\n import tensorflow as tf\n record_iterator = tf.compat.v1.python_io.tf_record_iterator(root)\n example = tf.train.SequenceExample()\n for element in record_iterator:\n example.ParseFromString(element)\n break\n feature = example.context.feature\n if len(feature['input_ids'].int64_list.value) == 0 \\\n and len(feature['input_mask'].int64_list.value) == 0:\n raise ValueError(\"Tfrecord format is incorrect, please refer\\\n 'https://github.com/tensorflow/models/blob/master/research/\\\n object_detection/dataset_tools/' to create correct tfrecord\")\n # pylint: disable=no-name-in-module\n from tensorflow.python.data.experimental import parallel_interleave\n tfrecord_paths = [root]\n ds = tf.data.TFRecordDataset.list_files(tfrecord_paths)\n ds = ds.apply(\n parallel_interleave(tf.data.TFRecordDataset,\n cycle_length=num_cores,\n block_length=5,\n sloppy=True,\n buffer_output_elements=10000,\n prefetch_input_elements=10000))\n if transform is not None:\n transform.transform_list.insert(0, ParseDecodeBert())\n else:\n transform = ParseDecodeBert()\n ds = ds.map(transform, num_parallel_calls=None)\n if filter is not None:\n ds = ds.filter(filter)\n ds = ds.prefetch(buffer_size=1000)\n from ..dataloaders.tensorflow_dataloader import TFDataDataLoader\n ds = TFDataDataLoader(ds)\n self.root = []\n for inputs in ds:\n self.root.append(inputs)\n self.transform = transform\n self.filter = filter\n\n def __getitem__(self, index):\n \"\"\"Magic method.\n\n x[i] is roughly equivalent to type(x).__getitem__(x, index)\n \"\"\"\n return self.root[index], self.label\n\n def __len__(self):\n \"\"\"Length of the dataset.\"\"\"\n return len(self.root)\n", "sub_path": "neural_compressor/experimental/data/datasets/bert_dataset.py", "file_name": "bert_dataset.py", "file_ext": "py", "file_size_in_byte": 19430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "neural_compressor.utils.utility.LazyImport", "line_number": 28, "usage_type": "call"}, {"api_name": "neural_compressor.utils.utility.LazyImport", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "dataset.Dataset", "line_number": 34, "usage_type": "name"}, {"api_name": "dataset.dataset_registry", "line_number": 33, "usage_type": "call"}, {"api_name": "dataset.Dataset", "line_number": 118, "usage_type": "name"}, {"api_name": "dataset.dataset_registry", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "torch.utils.data.load", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 195, "usage_type": "name"}, {"api_name": "torch.utils.data.save", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 212, "usage_type": "name"}, {"api_name": "torch.utils.data.tensor", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 214, "usage_type": "name"}, {"api_name": "torch.utils.data.long", "line_number": 214, "usage_type": "attribute"}, {"api_name": "torch.utils.data.tensor", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 215, "usage_type": "name"}, {"api_name": "torch.utils.data.long", "line_number": 215, "usage_type": "attribute"}, {"api_name": "torch.utils.data.tensor", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 216, "usage_type": "name"}, {"api_name": "torch.utils.data.long", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.utils.data.tensor", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 217, "usage_type": "name"}, {"api_name": "torch.utils.data.long", "line_number": 217, "usage_type": "attribute"}, {"api_name": "torch.utils.data.tensor", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 219, "usage_type": "name"}, {"api_name": "torch.utils.data.long", "line_number": 219, "usage_type": "attribute"}, {"api_name": "torch.utils.data.tensor", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 221, "usage_type": "name"}, {"api_name": "torch.utils.data.float", "line_number": 221, "usage_type": "attribute"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 222, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 318, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 319, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 319, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 321, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 321, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 322, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 322, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 326, "usage_type": "call"}, {"api_name": "dataclasses.asdict", "line_number": 326, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 300, "usage_type": "call"}, {"api_name": "dataset.Dataset", "line_number": 330, "usage_type": "name"}, {"api_name": "json.load", "line_number": 349, "usage_type": "call"}, {"api_name": "dataset.dataset_registry", "line_number": 329, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.VarLenFeature", "line_number": 384, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 384, "usage_type": "attribute"}, {"api_name": "tensorflow.int64", "line_number": 384, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.VarLenFeature", "line_number": 386, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 386, "usage_type": "attribute"}, {"api_name": "tensorflow.int64", "line_number": 386, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.VarLenFeature", "line_number": 388, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 388, "usage_type": "attribute"}, {"api_name": "tensorflow.int64", "line_number": 388, "usage_type": "attribute"}, {"api_name": "tensorflow.io.parse_single_example", "line_number": 391, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 391, "usage_type": "attribute"}, {"api_name": "dataset.Dataset", "line_number": 400, "usage_type": "name"}, {"api_name": "json.load", "line_number": 418, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.python_io.tf_record_iterator", "line_number": 422, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 422, "usage_type": "attribute"}, {"api_name": "tensorflow.train.SequenceExample", "line_number": 423, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 423, "usage_type": "attribute"}, {"api_name": "tensorflow.data.TFRecordDataset.list_files", "line_number": 436, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 436, "usage_type": "attribute"}, {"api_name": "tensorflow.python.data.experimental.parallel_interleave", "line_number": 438, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 438, "usage_type": "attribute"}, {"api_name": "dataloaders.tensorflow_dataloader.TFDataDataLoader", "line_number": 453, "usage_type": "call"}, {"api_name": "dataset.dataset_registry", "line_number": 399, "usage_type": "call"}]} +{"seq_id": "300070153", "text": "import pymysql.cursors\n\nDB_HOST = \"127.0.0.1\"\nDB_USER = \"root\"\n\ncnx = pymysql.connect(host=DB_HOST, user=DB_USER)\n\nclass Database:\n\n\t@staticmethod\n\tdef one(sql):\n\t\tcursor = Database.query(sql)\n\t\tif cursor.rowcount > 0:\n\t\t\tone = cursor.fetchone()\n\t\t\tcursor.close()\n\t\t\treturn one\n\t\telse:\n\t\t\treturn None\n\n\t@staticmethod\n\tdef all(sql):\n\t\tcursor = Database.query(sql)\n\t\tif cursor.rowcount > 0:\n\t\t\tmany = cursor.fetchall()\n\t\t\tcursor.close()\n\t\t\treturn many\n\t\telse:\n\t\t\treturn []\n\n\tdef insert(sql):\n\t\tDatabase.query(sql)\n\n\t@staticmethod\n\tdef query(query):\n\t\ttry:\n\t\t\twith cnx.cursor() as cursor:\n\t\t\t\tcursor.execute(query)\n\t\t\t\tcnx.commit()\n\t\t\t\treturn cursor\n\t\texcept Exception as inst:\n\t\t\traise inst\n", "sub_path": "database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pymysql.cursors.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "132566029", "text": "#! /usr/bin/env python3\n\nimport requests\nimport json\nfrom colorama import Fore, Style\n\n\nAPI_URL = 'https://googledictionaryapi.eu-gb.mybluemix.net/'\nLANGUAGE_CODE = 'en'\n\n\nclass ConnectionError(Exception):\n pass\n\n\nclass word():\n def __init__(self, word_str):\n word.word_str = word_str\n\n request_url = API_URL + '?define=' + word_str + '&lang' + LANGUAGE_CODE\n response = requests.get(request_url)\n\n if response.status_code == 500:\n raise ConnectionError('Cannot connect to the server')\n if response.status_code == 404:\n raise ValueError('Word not found')\n\n word_data = json.loads(response.content.decode('utf-8'))[0]\n word.phonetic = word_data['phonetic']\n word.meaning = word_data['meaning']\n\n @staticmethod\n def print_data_with_indentation(data, level=0):\n if type(data) == str:\n print(' ' * level, data)\n return\n\n if type(data) == dict:\n for key, value in data.items():\n print(' ' * level, '-', Fore.GREEN, key.strip(),\n Style.RESET_ALL, ':', end='')\n if type(value) == str:\n print(' ', value)\n else:\n print()\n word.print_data_with_indentation(value, level + 1)\n else:\n for key in data:\n word.print_data_with_indentation(key, level)\n\n def print_phonetic(self):\n print('Phonetic: {}'.format(self.phonetic))\n\n def print_meaning(self):\n word.print_data_with_indentation(self.meaning, level=0)\n\n\nif __name__ == '__main__':\n w = word('hello')\n w.print_meaning()\n", "sub_path": "gdict/word.py", "file_name": "word.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 40, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 40, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "105360236", "text": "import os\nimport sys\nimport shutil\nfrom shutil import copyfile\n\nimport argparse\nfrom argparse import Namespace\nimport yaml\nimport json\n\nimport numpy as np\nimport SimpleITK as sitk\n\nfrom data.dicom_loader import DicomLoader\n\n\ndef pairwise_registration(dirs, par_list, master, refs, exe):\n reg_dir = dirs['registration_dir']\n warped_dir = dirs['warped_dir']\n dfs_dir = dirs['dfs_dir']\n\n par_list.append('-t ' + master)\n par_list.append('-o ' + reg_dir)\n\n # Pairwise registration\n if os.path.isdir(warped_dir):\n shutil.rmtree(warped_dir)\n os.makedirs(warped_dir, exist_ok=True)\n\n if os.path.isdir(dfs_dir):\n shutil.rmtree(dfs_dir)\n os.makedirs(dfs_dir, exist_ok=True)\n\n for itr, ref in enumerate(refs):\n current_par_list = par_list.copy()\n current_par_list.append('-r ' + ref)\n\n reg_pars = ' '.join(current_par_list)\n reg_cmd = ' '.join([exe, reg_pars])\n os.system(reg_cmd)\n\n # save registration result\n result_dir = os.path.join(reg_dir, \"vtk\")\n warped = sorted([os.path.join(result_dir, i) for i in os.listdir(result_dir) if i.startswith(\"warpedImage_\")])\n warped_copy = os.path.join(warped_dir, (\"warpedImg%05d.vtk\" % itr))\n copyfile(warped[-1], warped_copy)\n\n df = sorted([os.path.join(result_dir, i) for i in os.listdir(result_dir) if i.startswith(\"displacement_\")])\n df_copy = os.path.join(dfs_dir, (\"dfReg%05d.vtk\" % itr))\n copyfile(df[-1], df_copy)\n\n\ndef main(config):\n # ----------------------------------------------------------\n # Load configuration parameters\n # ----------------------------------------------------------\n with open(config, 'r') as config_stream:\n cfg = yaml.safe_load(config_stream)\n\n opt = Namespace(**cfg['options'])\n exe = Namespace(**cfg['exe'])\n cfg_general = Namespace(**cfg['general'])\n cfg_reg2d = cfg['reg2d']\n cfg_reg3d = cfg['reg3d']\n cfg_gpr_model = Namespace(**cfg['gpr_model'])\n cfg_gpr_learn = Namespace(**cfg['gpr_learn'])\n\n # ----------------------------------------------------------\n # Preprocessing\n # ----------------------------------------------------------\n # Parse data files\n opt_data = Namespace(**cfg['general'])\n opt_data.is_navi = False\n opt_data.input_dir = os.path.join(opt_data.root_dir, opt_data.data_dir)\n opt_data.output_dir = os.path.join(opt_data.root_dir, opt_data.data_dir + \"_mod\")\n\n if opt.preprocessing:\n print('PREPROCESSING DATA FILES...')\n # Assert\n if not os.path.exists(opt_data.input_dir):\n sys.exit('Path to data files does not exist.')\n\n if os.path.isdir(opt_data.output_dir):\n if os.path.isdir(os.path.join(opt_data.output_dir, 'sorted')):\n shutil.rmtree(os.path.join(opt_data.output_dir, 'sorted'))\n [os.remove(os.path.join(opt_data.output_dir, f)) for f in os.listdir(opt_data.output_dir)]\n else:\n os.makedirs(opt_data.output_dir, exist_ok=True)\n\n data_loader = DicomLoader(opt_data)\n data_loader.preprocess()\n print('[done]')\n\n # Parse navi if required\n if cfg_general.surrogate_type == 0 or cfg_general.surrogate_type == 2:\n opt_navi = Namespace(**cfg['general'])\n opt_navi.is_navi = True\n opt_navi.input_dir = os.path.join(opt_navi.root_dir, opt_navi.navi_dir)\n opt_navi.output_dir = os.path.join(opt_navi.root_dir, opt_navi.navi_dir + \"_mod\")\n\n if opt.preprocessing:\n print('PREPROCESSING NAVIS...')\n if not os.path.exists(opt_navi.input_dir):\n sys.exit('Path to navigators does not exist.')\n\n if os.path.isdir(opt_navi.output_dir):\n [os.remove(os.path.join(opt_navi.output_dir, f)) for f in os.listdir(opt_navi.output_dir)]\n else:\n os.makedirs(opt_navi.output_dir, exist_ok=True)\n\n navi_loader = DicomLoader(opt_navi)\n navi_loader.preprocess()\n print('[done]')\n\n registration2d_dir = os.path.join(cfg_general.root_dir, 'reg_2d')\n reg2d_dirs = {\n 'registration_dir': registration2d_dir,\n 'warped_dir': os.path.join(registration2d_dir, 'warpedImage'),\n 'dfs_dir': os.path.join(registration2d_dir, 'dfs')\n }\n\n if opt.registration_2d:\n print('2D REGISTRATION OF NAVIS')\n refs = sorted([os.path.join(opt_navi.output_dir, i) for i in os.listdir(opt_navi.output_dir) if i.startswith('navi')])\n target = os.path.join(opt_navi.output_dir, cfg_general.master_navi)\n print('Registration 2d: Number of reference images: ' + str(len(refs)))\n\n pairwise_registration(reg2d_dirs, cfg_reg2d, target, refs, exe.registration_2d)\n print('[done]')\n\n # ----------------------------------------------------------\n # Stacking\n # ----------------------------------------------------------\n # TODO: change order of assignment (if input_dir defined, use this path always)\n stacking_par_list = []\n if cfg_general.surrogate_type == 0:\n stack_dir = os.path.join(cfg_general.root_dir, 'stacks_navi')\n surrogate_dir = os.path.join(registration2d_dir, 'dfs')\n stacking_method = 'vonSiebenthal'\n series_format = 'dfReg%05d.vtk'\n elif cfg_general.surrogate_type == 1:\n stack_dir = os.path.join(cfg_general.root_dir, 'stacks_us')\n surrogate_dir = os.path.join(cfg_general.root_dir, cfg_general.us_dir)\n stacking_method = 'ultrasound'\n series_format = '%05d.png'\n elif cfg_general.surrogate_type == 2:\n stack_dir = os.path.join(cfg_general.root_dir, 'stacks_navi')\n surrogate_dir = os.path.join(registration2d_dir, 'dfs')\n stacking_method = 'pusterla'\n series_format = 'dfReg%05d.vtk'\n else:\n try:\n surrogate_dir = os.path.join(cfg_general.root_dir, cfg_general.input_dir)\n except:\n sys.exit('Surrogate not correctly defined')\n\n # Assert\n if not os.path.isdir(surrogate_dir):\n sys.exit('Path to surrogate data does not exist')\n\n if opt.stacking:\n print('STACKING...')\n stacking_par_list.append('-o ' + stack_dir)\n stacking_par_list.append('-data ' + opt_data.output_dir)\n stacking_par_list.append('-surrogate ' + surrogate_dir)\n stacking_par_list.append('-startIndex 0')\n stacking_par_list.append('-endIndex ' + str(cfg_general.n_sweeps * cfg_general.n_slices - 1))\n stacking_par_list.append('-seriesFormat ' + series_format)\n stacking_par_list.append('-numberOfSweeps ' + str(cfg_general.n_sweeps))\n stacking_par_list.append('-numberOfSlicePos ' + str(cfg_general.n_slices))\n stacking_par_list.append('-stackingMethod ' + stacking_method)\n stacking_par_list.append('-save')\n\n stacking_pars = ' '.join(stacking_par_list)\n stacking_cmd = ' '.join([exe.stacking, stacking_pars])\n\n if os.path.isdir(stack_dir):\n [os.remove(os.path.join(stack_dir, f)) for f in os.listdir(stack_dir)]\n else:\n os.makedirs(stack_dir, exist_ok=True)\n\n os.system(stacking_cmd)\n print('[done]')\n\n # ----------------------------------------------------------\n # 3D Registration\n # ----------------------------------------------------------\n # TODO: change order of assignment (if output_dir defined, use this path always)\n if cfg_general.surrogate_type == 0 or cfg_general.surrogate_type == 2:\n registration3d_dir = os.path.join(cfg_general.root_dir, 'reg_3d_navi')\n elif cfg_general.surrogate_type == 1:\n registration3d_dir = os.path.join(cfg_general.root_dir, 'reg_3d_us')\n else:\n try:\n registration3d_dir = os.path.join(cfg_general.root_dir, cfg_general.output_dir)\n except:\n sys.exit('Data directory not correctly defined')\n\n reg3d_dirs = {\n 'registration_dir': registration3d_dir,\n 'warped_dir': os.path.join(registration3d_dir, 'warpedImage'),\n 'dfs_dir': os.path.join(registration3d_dir, 'dfs')\n }\n\n if opt.registration_3d:\n print('3D REGISTRATION...')\n refs = sorted([os.path.join(stack_dir, i) for i in os.listdir(stack_dir) if i.startswith('vol')])\n target = os.path.join(stack_dir, cfg_general.master_volume)\n print('Registration 3d: Number of reference images: ' + str(len(refs)))\n\n pairwise_registration(reg3d_dirs, cfg_reg3d, target, refs, exe.registration_3d)\n print('[done]')\n\n # ----------------------------------------------------------\n # Split data into training and test set\n # ----------------------------------------------------------\n if opt.splitting_data or ((opt.registration_2d or opt.registration_3d) and opt.regression):\n print('SPLITTING...')\n n_imgs = cfg_general.n_sweeps*cfg_general.n_slices\n n_training_imgs = cfg_general.n_training_sweeps*cfg_general.n_slices\n n_test_imgs = n_imgs - n_training_imgs\n\n # Create directories\n sub_dir = {'surrogate': surrogate_dir,\n 'dfs': reg3d_dirs['dfs_dir'],\n 'warped': reg3d_dirs['warped_dir']}\n\n for name, current_dir in sub_dir.items():\n if current_dir == surrogate_dir:\n format = cfg_general.input_format\n else:\n format = cfg_general.output_format\n\n files = sorted([os.path.join(current_dir, i) for i in os.listdir(current_dir) if i.endswith(format)])\n train_dir = os.path.join(current_dir, 'train')\n test_dir = os.path.join(current_dir, 'test')\n\n # Create or empty folder\n # train\n if os.path.isdir(train_dir):\n [os.remove(os.path.join(train_dir, f)) for f in os.listdir(train_dir)]\n else:\n os.makedirs(train_dir, exist_ok=True)\n\n # test\n if os.path.isdir(test_dir):\n [os.remove(os.path.join(test_dir, f)) for f in os.listdir(test_dir)]\n else:\n os.makedirs(test_dir, exist_ok=True)\n\n # copy all training files to train_dir\n for itr, file in enumerate(files[:n_training_imgs]):\n dest = os.path.join(train_dir, ('%05d.' % itr) + format)\n copyfile(file, dest)\n\n # copy all test files to train_dir\n for itr, file in enumerate(files[n_training_imgs:]):\n dest = os.path.join(test_dir, ('%05d.' % itr) + format)\n copyfile(file, dest)\n\n print('Splitting: Number of training images in' + train_dir +': ' + str(len(os.listdir(train_dir))))\n print('Splitting: Number of training images in' + test_dir +': ' + str(len(os.listdir(test_dir))))\n print('[done]')\n\n # ----------------------------------------------------------\n # GP Regression\n # ----------------------------------------------------------\n # Config files\n cfg_model = os.path.join(cfg_general.root_dir, 'config_model.json')\n with open(cfg_model, 'w') as fp:\n json.dump(cfg['gpr_model'], fp)\n\n cfg_learn = os.path.join(cfg_general.root_dir, 'config_learn.json')\n with open(cfg_learn, 'w') as fp:\n json.dump(cfg['gpr_learn'], fp)\n\n cfg_predict = os.path.join(cfg_general.root_dir, 'config_predict.json')\n with open(cfg_predict, 'w') as fp:\n json.dump(cfg['gpr_predict'], fp)\n\n # Folder structure\n subdir = cfg_gpr_model.subdir # validation, test\n gpr_dir = os.path.join(registration3d_dir, 'gpr')\n gpr_prefix = os.path.join(gpr_dir, 'gpr')\n gpr_result_dir = os.path.join(registration3d_dir, '{:s}_pred'.format(subdir))\n gpr_ar_dir = os.path.join(cfg_general.root_dir, cfg_general.ar_dir)\n\n # Perform regression\n if opt.regression:\n print('GP REGRESSION...')\n if os.path.isdir(gpr_dir):\n if not cfg_gpr_learn.use_precomputed:\n [os.remove(os.path.join(gpr_dir, f)) for f in os.listdir(gpr_dir)]\n else:\n # os.system('sudo mkdir {:s}'.format(gpr_dir))\n os.makedirs(gpr_dir, exist_ok=True)\n\n if os.path.isdir(gpr_result_dir):\n [os.remove(os.path.join(gpr_result_dir, f)) for f in os.listdir(gpr_result_dir)]\n else:\n os.makedirs(gpr_result_dir, exist_ok=True)\n # os.system('sudo mkdir {:s}'.format(gpr_result_dir))\n\n # Learn\n gpr_learn_par_list = []\n gpr_learn_par_list.append(cfg_model)\n gpr_learn_par_list.append(cfg_learn)\n gpr_learn_par_list.append(gpr_prefix)\n gpr_learn_par_list.append(os.path.join(surrogate_dir, 'train'))\n gpr_learn_par_list.append(os.path.join(registration3d_dir, 'train'))\n gpr_learn_par_list.append(gpr_ar_dir)\n\n gpr_learn_pars = ' '.join(gpr_learn_par_list)\n gpr_learn_cmd = ' '.join([exe.regression_learn, gpr_learn_pars])\n os.system(gpr_learn_cmd)\n\n # Predict\n gpr_predict_par_list = []\n gpr_predict_par_list.append(cfg_model)\n gpr_predict_par_list.append(cfg_predict)\n gpr_predict_par_list.append(gpr_prefix)\n gpr_predict_par_list.append(os.path.join(surrogate_dir, subdir))\n gpr_predict_par_list.append(os.path.join(registration3d_dir, subdir))\n gpr_predict_par_list.append(gpr_result_dir)\n gpr_predict_par_list.append(os.path.join(cfg_general.root_dir, cfg_general.master_volume))\n\n gp_predict_pars = ' '.join(gpr_predict_par_list)\n gp_predict_cmd = ' '.join([exe.regression_predict, gp_predict_pars])\n os.system(gp_predict_cmd)\n print('[done]')\n\n # ----------------------------------------------------------\n # Evaluation\n # ----------------------------------------------------------\n diff_dir = os.path.join(registration3d_dir, '{:s}_diff'.format(subdir))\n if opt.evaluation:\n print('EVALUATION...')\n if os.path.isdir(diff_dir):\n [os.remove(os.path.join(diff_dir, f)) for f in os.listdir(diff_dir)]\n else:\n os.makedirs(diff_dir, exist_ok=True)\n\n # Compute difference between ground-truth and gpr prediction\n if cfg_general.eval_warped:\n dfs_test_dir = os.path.join(reg3d_dirs['dfs_dir'], subdir)\n warped_test_dir = os.path.join(reg3d_dirs['warped_dir'], subdir)\n\n warped_true = sorted([os.path.join(warped_test_dir, i) for i in os.listdir(warped_test_dir)\n if i.endswith(cfg_general.output_format)])\n warped_pred = sorted([os.path.join(gpr_result_dir, i) for i in os.listdir(gpr_result_dir) if i.startswith('warpedImg')])\n\n stacks_true = sorted([os.path.join(stack_dir, i) for i in os.listdir(stack_dir)\n if i.startswith('vol')])\n else:\n dfs_test_dir = os.path.join(registration3d_dir, subdir)\n\n dfs_true = sorted([os.path.join(dfs_test_dir, i) for i in os.listdir(dfs_test_dir)\n if i.endswith(cfg_general.output_format)])\n dfs_pred = sorted([os.path.join(gpr_result_dir, i) for i in os.listdir(gpr_result_dir)\n if i.startswith('dfPred')])\n\n for itr in range(0, len(dfs_true)):\n # read images\n if cfg_general.eval_warped:\n sitk_imgs = {\n 'stack_true': sitk.ReadImage(stacks_true[itr]),\n 'warped_true': sitk.ReadImage(warped_true[itr]),\n 'warped_pred': sitk.ReadImage(warped_pred[itr]),\n 'df_true': sitk.ReadImage(dfs_true[itr]),\n 'df_pred': sitk.ReadImage(dfs_pred[itr])\n }\n else:\n sitk_imgs = {\n 'df_true': sitk.ReadImage(dfs_true[itr]),\n 'df_pred': sitk.ReadImage(dfs_pred[itr])\n }\n\n # Convert sitk to np\n np_imgs = {}\n for name, img in sitk_imgs.items():\n np_imgs[name] = sitk.GetArrayFromImage(img)\n\n # Qualitative comparison\n if cfg_general.eval_warped:\n np_diff = {\n 'stack': np.absolute(np_imgs['stack_true'] - np_imgs['warped_pred']),\n 'warped': np.absolute(np_imgs['warped_true']*4095 - np_imgs['warped_pred']),\n 'df': np_imgs['df_true'] - np_imgs['df_pred']\n }\n else:\n np_diff = {\n 'df': np_imgs['df_true'] - np_imgs['df_pred']\n }\n\n sitk_diff = {}\n for name, img in np_diff.items():\n diff = sitk.GetImageFromArray(img)\n diff.SetDirection(sitk_imgs[name + '_true'].GetDirection())\n diff.SetSpacing(sitk_imgs[name + '_true'].GetSpacing())\n diff.SetOrigin(sitk_imgs[name + '_true'].GetOrigin())\n\n sitk_diff[name] = diff\n sitk.WriteImage(diff, os.path.join(diff_dir, ('diff_' + name + '%05d.vtk' % itr)))\n\n print('[done]')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help='path to config.yaml file', type=str, default='./params/config.yaml')\n args = parser.parse_args()\n\n main(args.config)\n", "sub_path": "scripts/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 17353, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 27, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 31, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 50, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 58, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 60, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 61, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 62, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 65, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 66, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 86, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 88, "usage_type": "call"}, {"api_name": "data.dicom_loader.DicomLoader", "line_number": 90, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 107, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 109, "usage_type": "call"}, {"api_name": "data.dicom_loader.DicomLoader", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 178, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 180, "usage_type": "call"}, {"api_name": "os.system", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 241, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 247, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path", "line_number": 253, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 259, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 261, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path", "line_number": 283, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path", "line_number": 284, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path", "line_number": 285, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 286, "usage_type": "call"}, {"api_name": "os.path", "line_number": 286, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path", "line_number": 293, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 293, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 298, "usage_type": "call"}, {"api_name": "os.path", "line_number": 298, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 299, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 299, "usage_type": "call"}, {"api_name": "os.path", "line_number": 299, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 299, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 301, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 309, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path", "line_number": 310, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 315, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path", "line_number": 323, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 339, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 341, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path", "line_number": 345, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path", "line_number": 346, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path", "line_number": 355, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 359, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 366, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 367, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 368, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 369, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 370, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 374, "usage_type": "call"}, {"api_name": "SimpleITK.ReadImage", "line_number": 375, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 387, "usage_type": "call"}, {"api_name": "SimpleITK.GetImageFromArray", "line_number": 397, "usage_type": "call"}, {"api_name": "SimpleITK.WriteImage", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path", "line_number": 403, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 409, "usage_type": "call"}]} +{"seq_id": "579837309", "text": "import numpy as np\nimport glob\nimport threading\nimport time\nimport cv2\n\nread_files = set([])\nsoup_rad = 30 #px\ndt = np.dtype([('w', np.intc),\n ('h', np.intc),\n ('low', np.intc),\n ('high', np.intc),\n ('int_temp', np.intc),\n ('pad', np.intc),\n ('time', 'd'),\n ('img', np.uint16, (160 * 120,))\n ])\n\n\ndef conv_celsius(temps):\n r = 395653\n b = 1428\n f = 1\n o = 156\n t_k = b / np.log(r / (temps - o) + f)\n return t_k - 273.15\n\n\n# Creates a circle mask with some radius and center.\ndef cmask(center, radius, array_like):\n a, b = center\n nx, ny = array_like.shape\n y, x = np.ogrid[-a:nx-a,-b:ny-b]\n mask = x*x + y*y <= radius*radius\n return mask\n\n\nclass ObjectStorer(object):\n def __init__(self, init):\n self.value = init\n self.time = 0\n\n def store(self, init):\n self.value = init\n\n def storet(self, t):\n self.time = t\n\n\n# Grabs thermal image frames from the .dat files.\ndef grab_frames(last_time):\n frame_list = np.array([]).reshape(0,19200)\n times_list = np.array([])\n dat_files = set( glob.glob('*.dat') )\n to_read = dat_files - read_files\n for fname in to_read:\n a = np.fromfile(fname, dtype=dt)\n ts = a['time']\n imgs = a['img']\n\n mask = ts>=last_time\n frame_list = np.concatenate((frame_list, imgs[mask]), axis=0)\n times_list = np.concatenate((times_list, ts[mask]))\n\n if ts[mask].shape[0] == 0 and fname != 'thermal.dat':\n read_files.add(fname)\n\n sort_order = np.argsort(times_list, axis=0)\n return frame_list[sort_order], times_list[sort_order]\n\n\ndef get_lapl(img):\n lapl = cv2.Laplacian(img, cv2.CV_64F)\n r, c = img.shape\n cm = cmask((r/2, c/2), soup_rad, img)\n lapl[cm] = np.nan\n # lapl[np.abs(lapl) > 7] = 0\n m = np.nanmean(lapl)\n lapl[cm] = m\n return lapl\n\n\n# Checks the a set of images for splatter.\n# Currently checks the target and previous frame.\n# Takes the Laplacian of both images, and if there is any significant increase greater than 10(?), it marks a splatter.\n# I don't actually know the units, just guessing with this magic number right now.\ndef check_splatter(images):\n assert images.shape[0] == 2\n\n # Strip the first few cols b/c theres some distortions.\n i1 = images[1].reshape(120,160)[5:-5]\n i2 = images[0].reshape(120,160)[5:-5]\n\n l1 = get_lapl(i1)\n l2 = get_lapl(i2)\n # diff = to_check[1] - 0.5 * to_check[0] - 0.5 * to_check[2]\n diff = l2 - l1\n\n score = np.amax(diff) - np.mean(diff)\n\n # if score > 10:\n # print('DEBUG')\n # print(cm.shape)\n # print(images.shape)\n # print(score)\n # print('END DEBUG')\n\n return score > 5\n\n\n# Main thread loop. Grabs the latest few frames, and checks them for splatter.\n# When it finds splatter, call on_splatter().\n# Repeat.\ndef thread_loop(latest_img, on_splatter):\n last_time = 0\n while True:\n frames, times = grab_frames(last_time)\n if len(frames) > 0:\n last = np.rot90(conv_celsius(frames[-1].reshape(120,160)))\n latest_img.store(last)\n latest_img.storet(times[-1])\n if len(frames) >= 3:\n frames = conv_celsius(frames)\n for i in range(1, len(frames) - 1):\n if check_splatter(frames[i - 1:i+1]):\n on_splatter(times[i])\n last_time = times[-2]\n\n\ndef start_thread(on_splatter=None):\n latest_img = ObjectStorer( np.zeros((120*160)) )\n if on_splatter:\n th = threading.Thread(target=thread_loop, args=[latest_img, on_splatter])\n else:\n th = threading.Thread(target=thread_loop, args=[latest_img, lambda x: print('Splatter at t={}'.format(x))])\n th.setDaemon(True)\n th.start()\n return latest_img\n\nif __name__ == '__main__':\n img = start_thread()\n time.sleep(3)\n print('Python time is ', time.time())\n", "sub_path": "microwave/Pi-side/therm_frame_grabber.py", "file_name": "therm_frame_grabber.py", "file_ext": "py", "file_size_in_byte": 3974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.dtype", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.intc", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.intc", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.intc", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.intc", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.intc", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.ogrid", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.fromfile", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.Laplacian", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 133, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 135, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 142, "usage_type": "call"}, {"api_name": "time.time", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "283078459", "text": "from unittest.mock import MagicMock\n\nimport pytest\nfrom PySide2.QtTest import QTest\n\nfrom node_launcher.constants import (\n BITCOIN_MAINNET_PEER_PORT,\n BITCOIN_MAINNET_RPC_PORT,\n TARGET_BITCOIN_RELEASE\n)\nfrom node_launcher.gui.menu.manage_bitcoind import BitcoindPortsLayout\n\n\n@pytest.fixture\ndef bitcoind_ports_layout() -> BitcoindPortsLayout:\n bitcoin = MagicMock()\n bitcoin.node_port = BITCOIN_MAINNET_PEER_PORT\n bitcoin.rpc_port = BITCOIN_MAINNET_RPC_PORT\n bitcoin.zmq_block_port = 18500\n bitcoin.zmq_tx_port = 18501\n bitcoin.zmq_tx_port = 18501\n bitcoin.software.release_version = TARGET_BITCOIN_RELEASE\n layout = BitcoindPortsLayout(bitcoin)\n return layout\n\n\nclass TestBitcoindConfigurationTab(object):\n def test_bitcoin_network_port(self,\n bitcoind_ports_layout,\n qtbot: QTest):\n assert bitcoind_ports_layout.bitcoin_network_port.text().endswith(\n str(BITCOIN_MAINNET_PEER_PORT)\n )\n\n def test_rpc_port(self,\n bitcoind_ports_layout,\n qtbot: QTest):\n assert bitcoind_ports_layout.rpc_port.text().endswith(\n str(BITCOIN_MAINNET_RPC_PORT)\n )\n\n def test_zmq_ports(self,\n bitcoind_ports_layout,\n qtbot: QTest):\n assert bitcoind_ports_layout.zmq_ports.text().endswith('18500/18501')\n\n", "sub_path": "tests/test_gui/test_menu/test_manage_bitcoind/test_bitcoind_ports_layout.py", "file_name": "test_bitcoind_ports_layout.py", "file_ext": "py", "file_size_in_byte": 1438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "unittest.mock.MagicMock", "line_number": 16, "usage_type": "call"}, {"api_name": "node_launcher.constants.BITCOIN_MAINNET_PEER_PORT", "line_number": 17, "usage_type": "name"}, {"api_name": "node_launcher.constants.BITCOIN_MAINNET_RPC_PORT", "line_number": 18, "usage_type": "name"}, {"api_name": "node_launcher.constants.TARGET_BITCOIN_RELEASE", "line_number": 22, "usage_type": "name"}, {"api_name": "node_launcher.gui.menu.manage_bitcoind.BitcoindPortsLayout", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 14, "usage_type": "attribute"}, {"api_name": "node_launcher.gui.menu.manage_bitcoind.BitcoindPortsLayout", "line_number": 15, "usage_type": "name"}, {"api_name": "PySide2.QtTest.QTest", "line_number": 30, "usage_type": "name"}, {"api_name": "node_launcher.constants.BITCOIN_MAINNET_PEER_PORT", "line_number": 32, "usage_type": "argument"}, {"api_name": "PySide2.QtTest.QTest", "line_number": 37, "usage_type": "name"}, {"api_name": "node_launcher.constants.BITCOIN_MAINNET_RPC_PORT", "line_number": 39, "usage_type": "argument"}, {"api_name": "PySide2.QtTest.QTest", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "587829680", "text": "# ! this program is too heavy when use pred_per !\nfrom sklearn.externals import joblib\nimport numpy as np\nfrom collections import defaultdict\nimport pickle\nfrom knock72 import pn_list,stem_list\n\ndef sig(x):\n s = 1 / (1 + np.exp(-x))\n return s\n\n#print(len(pn_list))\nlr = joblib.load('lr.pkl')\nweight = lr.coef_\nbias = lr.intercept_\n\nwith open('word_ids.pkl','rb') as ids:\n word_ids = pickle.load(ids)\n\npl_list = []\n\n\nfor i,line in enumerate(stem_list):\n sent_word_id = [[0] * len(word_ids)]\n for word in stem_list[i]:\n sent_word_id[0][word_ids[word]] += 1\n\n pred_label = lr.predict(sent_word_id)\n pl_list.append(pred_label[0])\n# print(pred_label)\n# print(pl_list)\nif __name__ == ' __main__':\n for i,line in enumerate(stem_list):\n sent_word_id = [[0] * len(word_ids)]\n for word in stem_list[i]:\n sent_word_id[0][word_ids[word]] += 1\n\n# print(lr.coef_[0][i]) # 推定値、偏回帰係数\n# print(lr.intercept_) # 切片\n# print(lr.predict(sent_word_id)[0]) # 予想ラベル\n# 必要:正解ラベル、予想ラベル、予想確率 = ax + b形\n ans = pn_list\n pred_label = lr.predict(sent_word_id)\n# pred_per = sig(np.dot(weight.T,sent_word_id) + bias)\n print('correct:{}\\tpred:{}'.format(pn_list[i],pred_label[0]))\n#,pred_per[0][0]\n", "sub_path": "yohta/chapter08/knock76.py", "file_name": "knock76.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.exp", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 13, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "knock72.stem_list", "line_number": 23, "usage_type": "argument"}, {"api_name": "knock72.stem_list", "line_number": 25, "usage_type": "name"}, {"api_name": "knock72.stem_list", "line_number": 33, "usage_type": "argument"}, {"api_name": "knock72.stem_list", "line_number": 35, "usage_type": "name"}, {"api_name": "knock72.pn_list", "line_number": 42, "usage_type": "name"}, {"api_name": "knock72.pn_list", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "389349564", "text": "#-*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.qda import QDA\nfrom sklearn.metrics import accuracy_score\n\"\"\"\nQuadratic Discriminant Analysis\nMeta-parameters:\n NONE\n\"\"\"\n\ndef train_classifier(xTrain_s, yTrain_s, kwargs):\n \"\"\"\n Train a naive baise classifier on xTrain and yTrain and return the trained\n classifier\n \"\"\"\n if type(xTrain_s) != list:\n classifier_s = QDA(**kwargs)\n classifier_s.fit(xTrain_s, yTrain_s)\n\n else:\n classifier_s = train_classifier_8(xTrain_s, yTrain_s, kwargs)\n\n return classifier_s\n\ndef train_classifier_8(xsTrain_s, yTrain_s, kwargs):\n \"\"\"\n performs the training and returns the predictors\n \"\"\"\n # If we work with the splitted dataset:\n\n classifier_s = []\n\n for n in range(len(xsTrain_s)):\n # Training:\n classifier = train_classifier(xsTrain_s[n], yTrain_s[n], kwargs)\n classifier_s.append(classifier)\n\n return classifier_s\n\ndef predict_proba(classifier_s, dataset_s):\n \"\"\"\n Given a dataset and a classifier, compute the proba prediction\n This function can be use for validation as well as for the test.\n \"\"\"\n if type(classifier_s) != list:\n # Probability of being in each label\n proba_predicted_s = classifier_s.predict_proba(dataset_s) #[:,1]\n\n else:\n proba_predicted_s = predict_proba_8(classifier_s, dataset_s)\n\n return proba_predicted_s\n\ndef predict_proba_8(classifier_s, dataset_s):\n \"\"\"\n Predict the output of this classifier on the the dataset divided in 8 groups\n \"\"\"\n\n # If we work with the splitted dataset:\n proba_predicted_s = []\n\n for n in range(len(dataset_s)):\n proba_predicted = predict_proba(classifier_s[n], dataset_s[n])\n proba_predicted_s.append(proba_predicted)\n\n return proba_predicted_s\n\n\ndef get_classification_error(y_predicted_s, y_true_s, normalize= True):\n\n if type(y_predicted_s) == list:\n prediction_error_s = []\n\n for n in range(len(y_predicted_s)):\n prediction_error_s.append(accuracy_score(y_true_s[n],\n y_predicted_s[n],\n normalize=normalize))\n else:\n prediction_error_s = accuracy_score(y_true_s, y_predicted_s,\n normalize=normalize)\n\n return prediction_error_s\n\n\n", "sub_path": "Analyses/qda.py", "file_name": "qda.py", "file_ext": "py", "file_size_in_byte": 2386, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sklearn.qda.QDA", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "243086556", "text": "import cv2\nimport PIL.Image as Image\nimport math\nimport numpy as np\nimport time\nimport argparse\nimport yaml\nimport os\nimport csv\n\nimport torch\nfrom torchvision import models\nimport torch.nn as nn\nfrom torchvision import transforms\n\n\nimport sys\nsys.path.append('../')\nfrom common import dnn_network\n\nclass FrameInferEval:\n def __init__(self,CFG):\n print(\"Eval Frame Infer\")\n\n self.frame_infer_log_top_path = CFG[\"frame_infer_log_top_path\"]\n self.frame_infer_log_file_name = CFG[\"frame_infer_log_file_name\"]\n\n self.dataset_data_top_path = CFG[\"dataset_data_top_path\"]\n self.dataset_data_file_name = CFG[\"dataset_data_file_name\"]\n\n self.saved_log_csv_top_path = CFG[\"saved_log_csv_top_path\"]\n self.saved_log_csv_file_name = CFG[\"saved_log_csv_file_name\"]\n\n self.loop_period = CFG[\"loop_period\"]\n\n self.bookmark_list = []\n\n self.do_eval()\n self.save_result_csv()\n\n def save_result_csv(self):\n result_csv_path = os.path.join(self.saved_log_csv_top_path, self.saved_log_csv_file_name)\n csv_file = open(result_csv_path, 'w')\n csv_w = csv.writer(csv_file)\n\n for row in self.bookmark_list:\n csv_w.writerow(row)\n \n csv_file.close()\n\n def do_eval(self):\n log_path = os.path.join(self.frame_infer_log_top_path, self.frame_infer_log_file_name)\n dataset_path = os.path.join(self.dataset_data_top_path, self.dataset_data_file_name)\n\n log_list = []\n with open(log_path) as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n log_list.append(row)\n\n dataset_list = []\n with open(dataset_path) as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n dataset_list.append(row)\n\n loop_bar = zip(log_list, dataset_list)\n \n for row_log, row_dataset in loop_bar:\n #pic_path = os.path.join(dataset_data_top_path, row_log[5])\n log_pic = cv2.imread(row_log[3])\n \n log_x = float(row_log[0])\n log_y = float(row_log[1])\n log_z = float(row_log[2])\n log_var = row_log[3]\n log_epistemic = row_log[4]\n\n data_x = float(row_dataset[0])/9.8\n data_y = float(row_dataset[1])/9.8\n data_z = float(row_dataset[2])/9.8\n\n print(log_x, log_y, log_z)\n print(data_x, data_y, data_z)\n\n print(\"\\n\")\n\n diff_x = abs(float(log_x) - float(data_x))\n diff_y = abs(float(log_y) - float(data_y))\n diff_z = abs(float(log_z) - float(data_z))\n\n tmp_bookmark_list = [row_log[3], log_x, log_y, log_z, diff_x, diff_y, diff_z]\n\n print(\"diff_x : \", diff_x)\n print(\"diff_y : \", diff_y)\n print(\"diff_z : \", diff_z)\n \n print(\"Do you want to save this picture's data? answer in y/n .\")\n print(\"If you want to exit, press q key\")\n\n cv2.imshow('image_log',log_pic)\n answer = cv2.waitKey(0)\n \n if answer == ord('y'):\n self.bookmark_list.append(tmp_bookmark_list)\n print(\"Save picture and data\\n\")\n elif answer == ord('q'):\n print(\"Stop evaluation\")\n cv2.destroyAllWindows()\n break\n else:\n print(\"\\n\")\n\n cv2.destroyAllWindows()\n print(\"\\n\")\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\"./eval_frame_infer.py\")\n\n parser.add_argument(\n '--eval_frame_infer_config', '-efic',\n type=str,\n required=False,\n default='/home/ros_catkin_ws/src/dnn_attitude_predictor_with_image/config/eval_frame_infer_config.yaml',\n help='Eval frame infer config yaml file',\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n\n #load yaml file\n try:\n print(\"Opening frame infer config file %s\", FLAGS.eval_frame_infer_config)\n CFG = yaml.safe_load(open(FLAGS.eval_frame_infer_config, 'r'))\n except Exception as e:\n print(e)\n print(\"Error opening frame infer config file %s\", FLAGS.eval_frame_infer_config)\n quit()\n\n frame_infer_eval = FrameInferEval(CFG)", "sub_path": "pysrc/regression/eval_frame_infer.py", "file_name": "eval_frame_infer.py", "file_ext": "py", "file_size_in_byte": 4314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 57, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 114, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 125, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "434375833", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn import svm\n\nfeature_selected = ['Danceability', \n 'Energy', \n 'Speechiness', \n 'Acousticness', \n 'Instrumentalness', \n 'Liveness',\n 'Valence',\n 'Loudness',\n 'Tempo',\n 'Artist_Score']\n\n\ntrain_set = pd.read_excel('./train_set/train.xlsx')\nXtrain = np.array(train_set[feature_selected])\nYtrain = np.array(train_set['label'], dtype=float)\ntest_set = pd.read_excel('./test_set/test.xlsx')\nXtest = np.array(test_set[feature_selected])\nYtest = np.array(test_set['label'], dtype=float)\n\nclf = svm.SVC(kernel='linear')\n\nclf.fit(Xtrain, Ytrain)\n\ntrain_predict = clf.predict(Xtrain)\ntrain_accuracy = (train_predict==Ytrain).mean()\nprint(\"Train accuracy:\", train_accuracy)\n\ntest_predict = clf.predict(Xtest)\ntest_accuracy = (test_predict==Ytest).mean()\nprint(\"Test accuracy:\", test_accuracy)\n\n", "sub_path": "SVM.py", "file_name": "SVM.py", "file_ext": "py", "file_size_in_byte": 977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_excel", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "487566172", "text": "#!/usr/bin/env python3\n\nimport sys\nimport nfc\n\n# PaSoRi RC-S380\n#PASORI_S380_PATH = 'usb:001:004' # usb:bus:device rerative\nPASORI_S380_PATH = 'usb:054c:06c3' # usb:vendorID abusolute identifier\n\ndef sc_from_raw(sc):\n return nfc.tag.tt3.ServiceCode(sc >> 6, sc & 0x3f)\n\ndef on_startup(targets):\n return targets\n\ndef on_connect(tag):\n print(\"[*] connected:\", tag)\n sc1 = sc_from_raw(0x200B)\n bc1 = nfc.tag.tt3.BlockCode(0, service=0)\n bc2 = nfc.tag.tt3.BlockCode(1, service=0)\n block_data = tag.read_without_encryption([sc1], [bc1, bc2])\n print(\"Student ID: \" + block_data[1:9].decode(\"utf-8\"))\n print(\"Shizudai ID: \" + block_data[24:32].decode(\"utf-8\"))\n return True\n\ndef on_release(tag):\n print(\"[*] released: \", tag)\n\ndef main(args):\n with nfc.ContactlessFrontend(PASORI_S380_PATH) as clf:\n while clf.connect(rdwr={\n 'on-startup': on_startup,\n 'on-connect': on_connect,\n 'on-release': on_release,\n }):\n pass\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "sub_path": "mysrc/copipe/orig_IDparser.py", "file_name": "orig_IDparser.py", "file_ext": "py", "file_size_in_byte": 1059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "nfc.tag.tt3.ServiceCode", "line_number": 11, "usage_type": "call"}, {"api_name": "nfc.tag", "line_number": 11, "usage_type": "attribute"}, {"api_name": "nfc.tag.tt3.BlockCode", "line_number": 19, "usage_type": "call"}, {"api_name": "nfc.tag", "line_number": 19, "usage_type": "attribute"}, {"api_name": "nfc.tag.tt3.BlockCode", "line_number": 20, "usage_type": "call"}, {"api_name": "nfc.tag", "line_number": 20, "usage_type": "attribute"}, {"api_name": "nfc.ContactlessFrontend", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "563353994", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\ntry:\n from mpl_toolkits.mplot3d import Axes3D as _Axes3D\nexcept:\n _Axes3d = None\n\n\n\n# some useful kwarg dictionaries for different plot layouts\nkwargs_mono = dict(mc='k',\n lc='.5',\n hllc='k',\n hlmc='k',\n hlms=7,\n strlc='k')\n\n\ndef _ax_map2d_fast(ax, sensor_net, proj='default', \n m='x', mew=.5, mc='b', ms=3,):\n if hasattr(sensor_net, 'sensors'):\n sensor_net = sensor_net.sensors\n \n locs = sensor_net.getLocs2d(proj=proj)\n h = plt.plot(locs[:,0], locs[:,1], m, color=mc, ms=ms, markeredgewidth=mew)\n \n return h\n\n \ndef _ax_map2d(ax, sensor_net, proj='default', hl=[], \n labels='name', lc='k', ls=8, l_dist=.01, # labels, l colors, l size\n m='x', mew=.5, mc='b', ms=3, # marker, m edge width, m color, m size,\n strm=None, strc=None, strms=None, strlc='r', # ...same for string labels; None -> same as digits\n hlm='*', hlmc='r', hlms=5, hllc='r'): # ...same for highlight\n # in case sensor_net parent is submitted\n if hasattr(sensor_net, 'sensors'):\n sensor_net = sensor_net.sensors\n \n if strm == None:\n strm = m\n if strc == None:\n strmc = mc\n if strms == None:\n strms = ms\n if strlc == None:\n strlc = lc\n \n ax.set_aspect('equal')\n ax.set_frame_on(False)\n ax.set_axis_off()\n \n locs = sensor_net.getLocs2d(proj=proj)\n # labels\n# if labels == 'name':\n# labels = []\n# markers\n# colorList = []\n# for s in sensor_net:\n# label = s.name\n# if label.isdigit():\n# label = r'$'+label+'$'\n# colorList.append([])\n# else:\n# colorList.append('r')\n# labels.append(label)\n# elif labels == 'id':\n# labels = range(sensor_net.n)\n# colorList = ['k'] * len(labels)\n# elif labels== 'legend':\n# separator=':'\n# labels = [r\"$%s$%s%s\"%(i, separator, s.name) for i, s \\\n# in enumerate(sensor_net) ]\n# colorList = ['k']*len(labels)\n# else:\n# colorList = labels = [None]*sensor_net.n\n# # markers\n# markers = np.array([marker] * sensor_net.n, dtype='S2')\n# markers[highlight] = highlightMarker\n #transOffset = plt.offset_copy(plt.gca().transData, fig=fig, x = 0.05, y=0.10, units='inches')\n for i in range(sensor_net.n):\n x = locs[i,0]\n y = locs[i,1]\n # label\n if labels is None:\n label = None\n elif labels == 'id':\n label = label_for_c = str(i)\n elif labels == 'legend':\n separator=':'\n label_for_c = sensor_net.names[i]\n label = r\"$%s$%s%s\"%(i, separator, label_for_c)\n else:\n label = label_for_c = sensor_net.names[i]\n # properties\n if i in hl:\n marker, marker_c, marker_s, label_c, label_s = hlm, hlmc, hlms, hllc, ls\n elif (label!=None) and label_for_c.isdigit():\n marker, marker_c, marker_s, label_c, label_s = m, mc, ms, lc, ls\n else:\n marker, marker_c, marker_s, label_c, label_s = strm, strmc, strms, strlc, ls\n plt.plot([x],[y], marker, color=marker_c, ms=marker_s, markeredgewidth=mew)#,label=label)\n if label != None:\n plt.text(x, y+l_dist, label, fontsize=label_s,# style='oblique', \n horizontalalignment='center', verticalalignment='bottom', \n color=label_c)\n\n\n\ndef map2d(sensor_net, figsize=(5,5), frame=.01, **kwargs):\n \"\"\"\n Arguments\n ---------\n \n ax: mpl.axes or ``None``\n target axes; a new fiigure is created if ``None``\n \n figsize:\n mpl figsize\n \n highlight: = []\n sensors which should be highlighted \n \n labels: \n how the sensors should be labelled: ``'name'``, ``'id'``, ``'legend'`` \n (names and id), ``None``. Labels can be custmized with the following \n additional arguments: ``lc='k'`` (label color), ``ls=8`` (label \n font size), and ``ldist`` (distance from the marker).\n \n markers: \n markers can be customized with the following arguments: ``m='x'`` \n (marker symbol), ``mc='b'`` (color), ``ms=3`` (size) and ``mew=0.5`` \n (marker edge width).\n \n proj:\n Transform to apply to 3 dimensional sensor coordinates for plotting \n locations in a plane\n \n \"\"\" \n # figure\n fig = plt.figure(figsize=figsize, facecolor='w')\n ax = plt.axes([frame, frame, 1 - 2 * frame, 1 - 2 * frame])\n # the following does not make the plot\n# fig = mpl.figure.Figure(figsize=figsize, facecolor='w')\n# ax = fig.add_axes([0,0,1,1])\n _ax_map2d(ax, sensor_net, **kwargs)\n \n return fig\n\n\n\n\ndef map3d(sensor_net, marker='c*', labels=False, headBall=0):\n \"\"\"not very helpful...\"\"\"\n if _Axes3D is None:\n raise ImportError(\"mpl_toolkits.mplot3d.Axes3D could not be imported\")\n \n if hasattr(sensor_net, 'sensors'):\n sensor_net = sensor_net.sensors\n locs = sensor_net.locs3d\n fig = plt.gcf()\n ax = _Axes3D(fig)\n ax.scatter(locs[:,0], locs[:,1], locs[:,2])\n # plot head ball\n if headBall>0:\n u = np.linspace(0, 1 * np.pi, 10)\n v = np.linspace(0, np.pi, 10)\n \n x = 5 * headBall * np.outer( np.cos(u), np.sin(v))\n z = 10 * (headBall * np.outer( np.sin(u), np.sin(v)) -.5) # vertical\n y = 5 * headBall * np.outer( np.ones(np.size(u)), np.cos(v)) # axis of the sphere\n ax.plot_surface(x, y, z, rstride=1, cstride=1, color='w')\n #n = 100\n #for c, zl, zh in [('r', -50, -25), ('b', -30, -5)]:\n #xs, ys, zs = zip(*\n # [(random.randrange(23, 32),\n # random.randrange(100),\n # random.randrange(zl, zh)\n # ) for i in range(n)])\n #ax.scatter(xs, ys, zs, c=c)\n", "sub_path": "eelbrain/plot/sensors.py", "file_name": "sensors.py", "file_ext": "py", "file_size_in_byte": 6037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.outer", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "525073202", "text": "from django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom.models import Contacts\nfrom django.core.mail import send_mail\n\n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST['name']\n listing_id = request.POST['listing_id']\n listing = request.POST['listing']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n id_user = request.POST['id_user']\n realtor_email = request.POST['realtor_email']\n\n if request.user.is_authenticated:\n id_user = request.user.id\n has_contactred = Contacts.objects.all().filter(id_user=id_user,listing_id=listing_id)\n if has_contactred:\n messages.error(request,'You have alredy made')\n return redirect('listing')\n\n\n contact =Contacts(listing=listing,listing_id=listing_id,name=name,phone=phone,email=email,message=message,id_user=id_user,realtor_email=realtor_email)\n\n contact.save()\n send_mail('property','there aye','richard.black96@mail.ru',[realtor_email,'techgueinfo@mail.ru'],fail_silently=False)\n messages.success(request,'Yourrequest has beensubmited')\n return redirect('listings')\n", "sub_path": "btre/contacts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "models.Contacts.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Contacts.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Contacts", "line_number": 20, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Contacts", "line_number": 26, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "39053513", "text": "from ChallengeClient import challengeinterface\r\nimport base64\r\n\r\n#############################################################\r\n# Function declarations\r\n#############################################################\r\nC_UPPER = range(ord('A'),ord('Z')+1) #goddammed +1 is required: fucked me up for an hour >_<\r\nC_LOWER = range(ord('a'),ord('z')+1)\r\n\r\n#rot-n function\r\ndef rot_n(n, msg):\r\n decoded = ''\r\n for c in msg:\r\n rotted = ord(c) + n\r\n if ord(c) in C_UPPER:\r\n if rotted > ord('Z'):\r\n rotted = (rotted - ord('Z')) + ord('A') - 1\r\n decoded += chr(rotted)\r\n else:\r\n decoded += chr(rotted)\r\n elif ord(c) in C_LOWER:\r\n if rotted > ord('z'):\r\n rotted = (rotted - ord('z')) + ord('a') - 1\r\n decoded += chr(rotted)\r\n else:\r\n decoded += chr(rotted)\r\n else:\r\n decoded += c\r\n\r\n return decoded\r\n\r\ndef all_casings(s):\r\n if not s:\r\n yield \"\"\r\n else:\r\n first = s[:1]\r\n if first.lower() == first.upper():\r\n for sub_casing in all_casings(s[1:]):\r\n yield first + sub_casing\r\n else:\r\n for sub_casing in all_casings(s[1:]):\r\n yield first.lower() + sub_casing\r\n yield first.upper() + sub_casing\r\n\r\n# select_rline\r\n# Takes the full challenge text as input and trims it down to\r\n# the line that you input, counting from the end of the string\r\n# e.g. if you input line=2, it will return the second last line\r\ndef select_rline(fulltext, rline):\r\n lines = fulltext.rsplit(\"\\n\")\r\n problemtext = lines[len(lines) - rline]\r\n return problemtext\r\n\r\n# solve_problem\r\n# Solve the problem in this function\r\ndef solve_problem(problemtext):\r\n # split string into fragments\r\n # 4 b64 characters represents 3 ascii characters\r\n i = 0\r\n frag = []\r\n while (i < len(problemtext)):\r\n frag.append(problemtext[i:i + 4])\r\n i += 4\r\n\r\n possible_rots = []\r\n for n in range(0, 26):\r\n possible_rots.append(n)\r\n\r\n found = {}\r\n for f in frag:\r\n perms = all_casings(f)\r\n #print(\"frag\",f)\r\n breakout = False\r\n for p in perms:\r\n if (breakout):\r\n breakout = False\r\n break\r\n #print(\"frag\",f,\"perm\",p)\r\n for n in possible_rots:\r\n new_r = rot_n(n, p) + \"===\"\r\n new_b = base64.b64decode(new_r)\r\n try:\r\n new_s = new_b.decode('ascii')\r\n good = True\r\n for c in new_s:\r\n if (not (ord(c) in C_UPPER or ord(c) == ord(\" \"))):\r\n good = False\r\n #print(c, ord(c), \"is not a good char\")\r\n break\r\n #else:\r\n #print(c,ord(c),\"is a good char\")\r\n if (good):\r\n #print(p, n, \"->\", new_r, \"=>\", new_s)\r\n if (n not in found):\r\n found[n] = []\r\n found[n].append(p)\r\n #breakout = True\r\n #break\r\n except UnicodeDecodeError:\r\n #print(\"decode error\",n,new_r)\r\n continue\r\n\r\n #print(found)\r\n #possible_rots = [*found.keys()]\r\n #print(possible_rots)\r\n\r\n #find longest array in found, which will give us the rot\r\n maxlen = 0\r\n index = -1\r\n for i in found:\r\n if (len(found[i]) > maxlen):\r\n maxlen = len(found[i])\r\n index = i\r\n coded = rot_n(index, \"\".join(found[index]))\r\n coded2 = base64.b64decode(coded)\r\n answer = coded2.decode('ascii')\r\n return answer\r\n\r\n#############################################################\r\n# Main code starts here\r\nif __name__ == \"__main__\":\r\n level = '6'\r\n serverip = \"15.223.13.29\"\r\n challengeport = 8001\r\n\r\n # start the challenge game\r\n challenge = challengeinterface(serverip, challengeport)\r\n print(challenge.start())\r\n\r\n # choose the level to run\r\n challengetext = challenge.select_level(level)\r\n print('\\nChallenge Text is:\\n' + challengetext)\r\n\r\n # trim the text down to the problem statement\r\n problemtext = select_rline(challengetext, 2)\r\n print('\\nProblem Text is:\\n' + problemtext)\r\n\r\n # solve the problem\r\n solution = solve_problem(problemtext)\r\n print('\\nYour solution is:\\n' + solution)\r\n\r\n # submit the answer\r\n result = challenge.submit_answer(solution)\r\n print('\\n Result is:\\n' + result)\r\n\r\n # close the socket at the end of the program\r\n challenge.exit()", "sub_path": "Solutions/6.py", "file_name": "6.py", "file_ext": "py", "file_size_in_byte": 4754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "base64.b64decode", "line_number": 81, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 115, "usage_type": "call"}, {"api_name": "ChallengeClient.challengeinterface", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "561142280", "text": "import math\nimport numpy as np\nfrom scipy.misc import imread, imresize\n\ndef read_images(image_paths):\n images = np.empty([image_paths.shape[0], 160, 320, 3])\n\n for i, path in enumerate(image_paths):\n images[i] = imread('data/'+path)\n\n return images\n\n\ndef preprocess(images):\n shape = (200, 66, 3)\n height, width, channels = shape\n images_resized = np.empty([images.shape[0], height, width, channels])\n for i, img in enumerate(images):\n images_resized[i] = imresize(img, shape)\n\n images = images_resized\n\n return images\n\n\ndef augment(images, angles):\n new_images = np.empty_like(images)\n new_angles = np.empty_like(angles)\n for i, (img, angle) in enumerate(zip(images, angles)):\n if np.random.choice(2):\n new_images[i] = np.fliplr(img)\n new_angles[i] = angle * -1\n else:\n new_images[i] = img\n new_angles[i] = angle\n\n images = new_images\n angles = new_angles\n\n return images, angles\n\n\ndef get_samples_per_epoch(array_size, batch_size):\n num_batches = array_size / batch_size\n # return value must be a number than can be divided by batch_size\n samples_per_epoch = math.ceil((num_batches / batch_size) * batch_size)\n samples_per_epoch = samples_per_epoch * batch_size\n return samples_per_epoch\n\n\ndef get_batch(images, angles, batch_size):\n\n samples = len(images)\n\n while True:\n selected = np.random.choice(samples, batch_size)\n images_batch, angles_batch = read_images(images[selected]), angles[selected].astype(float)\n\n images_batch, angles_batch = augment(preprocess(images_batch), angles_batch)\n\n yield images_batch, angles_batch", "sub_path": "data_generator.py", "file_name": "data_generator.py", "file_ext": "py", "file_size_in_byte": 1694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.empty", "line_number": 6, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.misc.imresize", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.fliplr", "line_number": 31, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "622577438", "text": "from pygame.sprite import Group\r\nimport pygame, sys\r\nimport game_function as g_f\r\nfrom background import Background\r\nfrom ship import Ship\r\nfrom settings import Settings\r\ndef init_game():\r\n pygame.init()\r\n game_settings = Settings()\r\n screan = pygame.display.set_mode((game_settings.screan_width, game_settings.screan_height))\r\n ship=Ship(screan)\r\n bullets=Group()\r\n aliens=Group()\r\n g_f.create_fleet(game_settings,screan,aliens,ship)\r\n background=Background(screan)\r\n\r\n pygame.display.set_caption(\"Dota 3\")\r\n while True:\r\n g_f.check_events(game_settings,screan,ship,bullets)\r\n g_f.update_screan(background,ship,bullets,aliens)\r\n ship.update()\r\n bullets.update()\r\n\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom<=0:\r\n bullets.remove(bullet)\r\n\r\ninit_game()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.init", "line_number": 8, "usage_type": "call"}, {"api_name": "settings.Settings", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "ship.Ship", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 13, "usage_type": "call"}, {"api_name": "game_function.create_fleet", "line_number": 14, "usage_type": "call"}, {"api_name": "background.Background", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "game_function.check_events", "line_number": 19, "usage_type": "call"}, {"api_name": "game_function.update_screan", "line_number": 20, "usage_type": "call"}, {"api_name": "ship.update", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "150714480", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This is easiest to run with helper script ./tools/pytest.sh\n\n\n__author__ = 'jtmoon79'\n__doc__ = \\\n \"\"\"Test the goto_http_redirect_server project using pytest.\"\"\"\n\nfrom collections import defaultdict\nfrom datetime import datetime\nimport getpass\nimport http\nfrom http import client\nimport threading\nimport time\nimport typing\nfrom urllib.parse import ParseResult\n\nimport pytest\n\nimport goto_http_redirect_server\nfrom goto_http_redirect_server.goto_http_redirect_server import (\n Re_User,\n Re_Date,\n Re_Entry,\n Re_EntryType,\n Re_EntryKey,\n Re_Entry_Dict,\n FromTo_List,\n REDIRECT_PATHS_NOT_ALLOWED,\n REDIRECT_CODE_DEFAULT,\n html_escape,\n html_a,\n htmls,\n print_debug,\n fromisoformat,\n to_ParseResult,\n redirect_handler_factory,\n RedirectHandler,\n RedirectServer,\n RedirectsLoader,\n)\nstr_None = typing.Optional[str]\n\n# override for comparisons of datetime.now() generated values\nNOW = datetime.now().replace(microsecond=0)\ngoto_http_redirect_server.goto_http_redirect_server.DATETIME_START = NOW\ngoto_http_redirect_server.goto_http_redirect_server.datetime_now = lambda: NOW\n# need something different than NOW\nLATER = datetime.now()\nLATER = LATER.replace(second=(LATER.second + 1 if LATER.second < 59 else 0))\n\nUSER = getpass.getuser()\n\n# shorten some names for clarity\ntopr = to_ParseResult\nET = Re_EntryType\n\n\n# all committed test resources should be under this directory\n#resources = Path.joinpath(Path(__file__).parent, 'test_resources')\n\n\ndef pr(**kwargs):\n \"\"\"create a ParseResult, sets unset parameters to empty string\"\"\"\n args = defaultdict(str, kwargs)\n return ParseResult(\n scheme=args['scheme'],\n netloc=args['netloc'],\n path=args['path'],\n params=args['params'],\n query=args['query'],\n fragment=args['fragment'],\n )\n\n\nclass Test_ClassesSimple(object):\n \"\"\"basic building-block classes\"\"\"\n\n @pytest.mark.parametrize(\n 'entry_args, entry_kwargs,'\n 'entry_expected, raises',\n (\n # basic error case\n pytest.param((), {},\n None, ValueError),\n # basic happy path\n pytest.param(('a', 'b'), {},\n Re_Entry('a', 'b'), None),\n # different Re_EntryType\n pytest.param(('a', 'b'), {},\n Re_Entry('a', 'b', USER, NOW, topr('a'), topr('b'), ET._), None),\n pytest.param(('a;', 'b'), {},\n Re_Entry('a;', 'b', USER, NOW, topr('a;'), topr('b'), ET._P), None),\n pytest.param(('a;?', 'b'), {},\n Re_Entry('a;?', 'b', USER, NOW, topr('a;?'), topr('b'), ET._PQ), None),\n pytest.param(('a?', 'b'), {},\n Re_Entry('a?', 'b', USER, NOW, topr('a?'), topr('b'), ET._Q), None),\n # different args\n pytest.param(('a', 'b', 'u3'), {},\n Re_Entry('a', 'b', 'u3', NOW, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a')), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b')), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), {},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n # different kwargs\n pytest.param(('a', 'b'), {'user': 'u3'},\n Re_Entry('a', 'b', 'u3', NOW, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER},\n Re_Entry('a', 'b', 'u3', LATER, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER},\n Re_Entry('a', 'b', 'u3', LATER, topr('a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER), {'from_pr': topr('NOT a')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER, 'from_pr': topr('NOT a')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a')), {'to_pr': topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER), {'from_pr': topr('NOT a'), 'to_pr': topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a'), 'to_pr' :topr('NOT b')},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b')), {'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b', 'u3', LATER, topr('NOT a')), {'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b', 'u3', LATER), {'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b', 'u3'), {'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n pytest.param(('a', 'b'), {'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n # all kwargs\n pytest.param((), {'from_': 'a', 'to': 'b', 'user': 'u3', 'date': LATER, 'from_pr': topr('NOT a'), 'to_pr': topr('NOT b'), 'etype': ET._P},\n Re_Entry('a', 'b', 'u3', LATER, topr('NOT a'), topr('NOT b'), ET._P), None),\n )\n )\n def test_Re_Entry(self,\n entry_args,\n entry_kwargs,\n entry_expected,\n raises):\n if raises:\n with pytest.raises(raises):\n Re_Entry(*entry_args, **entry_kwargs)\n else:\n entry = Re_Entry(*entry_args, **entry_kwargs)\n assert entry == entry_expected\n\n\nclass Test_Functions(object):\n\n @pytest.mark.parametrize(\n 's_, expected',\n (\n pytest.param('', htmls(''),),\n pytest.param('A', htmls('A'),),\n pytest.param('&', htmls('&'),),\n pytest.param('<>', htmls('<>'),),\n pytest.param('foo\\nbar', htmls('foo
\\nbar'),),\n )\n )\n def test_html_escape(self, s_: str, expected: htmls):\n actual = html_escape(s_)\n assert expected == actual\n assert type(actual) == type(expected)\n\n @pytest.mark.parametrize(\n 'href, text, expected',\n (\n pytest.param('', None, ''),\n pytest.param('', '', ''),\n pytest.param('ABC', None, 'ABC'),\n pytest.param('ABC', '', ''),\n pytest.param('ABC', '123', '123'),\n pytest.param('<>', '<>', '\"><>'),\n )\n )\n def test_html_a(self,\n href: str,\n text: str_None,\n expected: str):\n actual = html_a(href, text)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'dts, expected',\n (\n # these two cases will differ from Python 3.5 and subsequent Python versions\n #pytest.param('2001-01-02 03 04 05', datetime(year=2001, month=1, day=2, hour=3, minute=4, second=5)),\n #pytest.param('2002/01/02 03:04:05', datetime(year=2002, month=1, day=2, hour=3, minute=4, second=5)),\n pytest.param('2003-01-02 03:04:05', datetime(year=2003, month=1, day=2, hour=3, minute=4, second=5)),\n pytest.param('2004-01-02T03:04:05', datetime(year=2004, month=1, day=2, hour=3, minute=4, second=5)),\n pytest.param('BAD STRING', NOW),\n )\n )\n def test_fromisoformat(self,\n dts: str,\n expected: datetime):\n actual = fromisoformat(dts)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'pr1, pr2, expected',\n (\n pytest.param(pr(path='a'), pr(path='a'), True),\n pytest.param(pr(path='a'), pr(path='a', query='b'), True),\n pytest.param(pr(path='a'), pr(path='b'), False),\n pytest.param(pr(query='a'), pr(path='b', query='a'), False),\n )\n )\n def test_query_match(self,\n pr1: ParseResult,\n pr2: ParseResult,\n expected: bool):\n assert RedirectHandler.query_match(pr1, pr2) is expected\n\n @pytest.mark.parametrize(\n 'ppq, ppqpr,'\n 'redirects,'\n 'entry',\n (\n pytest.param(\n '/a0', pr(path='/a0'),\n {'/a0': Re_Entry('/a0', '/b')},\n Re_Entry('/a0', '/b')\n ),\n pytest.param(\n '/a1', pr(path='/a1'),\n {'/b': Re_Entry('/a1', '/b')},\n None,\n ),\n pytest.param(\n '/a2', pr(path='/a2'),\n {'/a2': Re_Entry('/a2', '/b'), '/a2;': Re_Entry('/a2;', '/b')},\n Re_Entry('/a2', '/b'),\n ),\n pytest.param(\n '/a3', pr(path='/a3'),\n {'/a3;': Re_Entry('/a3;', '/b'), '/a3;?': Re_Entry('/a3;?', '/b'), '/a3?': Re_Entry('/a3?', '/b'), '/a3': Re_Entry('/a3', '/b')},\n Re_Entry('/a3', '/b'),\n ),\n pytest.param(\n '/a4', pr(path='/a4'),\n {'/a4;': Re_Entry('/a4;', '/b'), '/a4?': Re_Entry('/a4?', '/b'), '/a4': Re_Entry('/a4', '/b'), '/a4;?': Re_Entry('/a4;?', '/b')},\n Re_Entry('/a4', '/b'),\n ),\n pytest.param(\n '/a5;c', pr(path='/a5', params='c'),\n {'/a5': Re_Entry('/a5', '/b'), '/a5;': Re_Entry('/a5;', '/b')},\n Re_Entry('/a5;', '/b'),\n ),\n pytest.param(\n '/a?00', pr(path='/a', query='00'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b')},\n None,\n ),\n pytest.param(\n '/a?01', pr(path='/a', query='01'),\n {'/a': Re_Entry('/a', '/b'), '/a;': Re_Entry('/a;', '/b')},\n Re_Entry('/a', '/b'),\n ),\n pytest.param(\n '/a;02', pr(path='/a', params='02'),\n {'/a': Re_Entry('/a', '/b'), '/a?': Re_Entry('/a?', '/b')},\n Re_Entry('/a', '/b'),\n ),\n pytest.param(\n '/a;03', pr(path='/a', params='03'),\n {'/a;?': Re_Entry('/a;?', '/b'), '/a?': Re_Entry('/a?', '/b')},\n None,\n ),\n pytest.param(\n '/a?04', pr(path='/a', query='04'),\n {'/a;': Re_Entry('/a;', '/b'), '/a?': Re_Entry('/a?', '/b')},\n Re_Entry('/a?', '/b'),\n ),\n pytest.param(\n '/a?05', pr(path='/a', query='05'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b')},\n None,\n ),\n pytest.param(\n '/a?06', pr(path='/a', query='06'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b'), '/a?': Re_Entry('/a?', '/b')},\n Re_Entry('/a?', '/b'),\n ),\n pytest.param(\n '/a?07', pr(path='/a', query='07'),\n {'/a;': Re_Entry('/a;', '/b'), '/a;?': Re_Entry('/a;?', '/b'), '/a?': Re_Entry('/a?', '/b'), '/a': Re_Entry('/a', '/b')},\n Re_Entry('/a?', '/b'),\n ),\n # XXX: Disable Path Required Request Modifier\n # with paths\n # pytest.param(\n # '/d/path?00', pr(path='/d/path', query='00'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b')},\n # None,\n # ),\n # pytest.param(\n # '/d/path?01', pr(path='/d/path', query='01'),\n # {'/d': Re_Entry('/d', '/b'), '/d/?': Re_Entry('/d/?', '/b')},\n # Re_Entry('/d/?', '/b'),\n # ),\n # pytest.param(\n # '/d;02', pr(path='/d', params='02'),\n # {'/d': Re_Entry('/d', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # Re_Entry('/d', '/b'),\n # ),\n # pytest.param(\n # '/d;03', pr(path='/d', params='03'),\n # {'/d;?': Re_Entry('/d;?', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # None,\n # ),\n # pytest.param(\n # '/d?04', pr(path='/d', query='04'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # Re_Entry('/d?', '/b'),\n # ),\n # pytest.param(\n # '/d?05', pr(path='/d', query='05'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b')},\n # None,\n # ),\n # pytest.param(\n # '/d?06', pr(path='/d', query='06'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b'), '/d?': Re_Entry('/d?', '/b')},\n # Re_Entry('/d?', '/b'),\n # ),\n # pytest.param(\n # '/d?07', pr(path='/d', query='07'),\n # {'/d;': Re_Entry('/d;', '/b'), '/d;?': Re_Entry('/d;?', '/b'), '/d?': Re_Entry('/d?', '/b'), '/d': Re_Entry('/d', '/b')},\n # Re_Entry('/d?', '/b'),\n # ),\n )\n )\n def test_query_match_finder(self,\n ppq: str, ppqpr: ParseResult,\n redirects: Re_Entry_Dict,\n entry: Re_Entry):\n assert RedirectHandler.query_match_finder(\n ppq, ppqpr,\n redirects) == entry\n\n @pytest.mark.parametrize(\n 'pr1,'\n 'pr2,'\n 'expected',\n (\n # URI component parts\n # https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlparse\n #\n # empty test cases\n pytest.param(\n pr(),\n pr(),\n '',\n id='(empty)'\n ),\n pytest.param(\n pr(scheme='http'),\n pr(scheme='http'),\n r'http://',\n id='scheme http'\n ),\n pytest.param(\n pr(scheme='https'),\n pr(scheme='http'),\n r'https://',\n id='scheme pr2'\n ),\n pytest.param(\n pr(scheme='https', netloc='a', path='b', params='c', query='d', fragment='e'),\n pr(),\n r'https://a/b;c?d#e',\n id='pr1 only'\n ),\n pytest.param(\n pr(),\n pr(scheme='https', netloc='a', path='b', params='c', query='d', fragment='e'),\n r';c?d#e',\n id='pr2 only'\n ),\n pytest.param(\n pr(),\n pr(scheme='https', netloc='a', path='b', params='c', query='d', fragment='e'),\n r';c?d#e',\n id='pr2 only'\n ),\n # precedence test cases\n pytest.param(\n pr(scheme='ftp', netloc='a1'),\n pr(scheme='ftp', netloc='a2'),\n r'ftp://a1',\n id='pr1.netloc'\n ),\n pytest.param(\n pr(scheme='ftp', netloc='a1', path='b1'),\n pr(scheme='ftp', netloc='a2', path='b2'),\n r'ftp://a1/b1',\n id='pr1.netloc pr1.path'\n ),\n pytest.param(\n pr(scheme='ftp', netloc='a1', query='d1'),\n pr(scheme='ftp', netloc='a2', query='d2'),\n r'ftp://a1?d1&d2',\n id='pr1.netloc pr1&2.query'\n ),\n pytest.param(\n pr(scheme='ftp', netloc='a1', fragment='f1'),\n pr(scheme='ftp', fragment='f2'),\n r'ftp://a1#f2',\n id='pr2.fragment'\n ),\n # Template Syntax basic test cases\n pytest.param(\n pr(netloc='a1', path='p1_${path}'),\n pr(path='p2'),\n r'//a1/p1_p2',\n id='Template Syntax: pr1.path \"p1_${path}\"'\n ),\n pytest.param(\n pr(netloc='a1', path='p1_${params}'),\n pr(params='r2'),\n r'//a1/p1_r2',\n id='Template Syntax: pr1.path \"p1_${params}\"'\n ),\n pytest.param(\n pr(netloc='a1', path='p1_${query}'),\n pr(query='q2'),\n r'//a1/p1_q2',\n id='Template Syntax: pr1.path \"p1_${query}\"'\n ),\n pytest.param(\n pr(netloc='a1', path='p1_${fragment}'),\n pr(fragment='f2'),\n r'//a1/p1_f2',\n id='Template Syntax: pr1.path \"p1_${fragment}\"'\n ),\n pytest.param(\n pr(netloc='a1', params='r1_${path}'),\n pr(path='p2'),\n r'//a1/;r1_p2',\n id='Template Syntax: pr1.params \"r1_${path}\"'\n ),\n pytest.param(\n pr(netloc='a1', query='q1_${path}'),\n pr(path='p2'),\n r'//a1?q1_p2',\n id='Template Syntax: pr1.query \"q1_${path}\"'\n ),\n pytest.param(\n pr(netloc='a1', fragment='f1_${path}'),\n pr(path='p2'),\n r'//a1#f1_p2',\n id='Template Syntax: pr1.fragment \"f1_${path}\"'\n ),\n # Template Syntax complex test cases\n # consuming ${path}\n # XXX: these are the odd behaviors of current implementation\n pytest.param(\n pr(netloc='a1', query='q1_${path}', fragment='f1_${path}'),\n pr(path='p2'),\n r'//a1?q1_p2#f1_path',\n id='Template Syntax1: consume ${path}'\n ),\n pytest.param(\n pr(netloc='a1_${path}', query='q1_${path}', fragment='f1'),\n pr(path='p2'),\n r'//a1_p2?q1_path#f1',\n id='Template Syntax2: consume ${path}'\n ),\n pytest.param(\n pr(netloc='a1', params='prm1', query='q1_${path}', fragment='f1'),\n pr(path='p2', params='prm2'),\n r'//a1/;prm1;prm2?q1_p2#f1',\n id='Template Syntax3: consume ${path}'\n ),\n pytest.param(\n pr(netloc='a1', query='q1_${query}', fragment='f1_${query}'),\n pr(path='p2'),\n r'//a1?q1_#f1_query',\n id='Template Syntax4: consume ${query}'\n ),\n pytest.param(\n pr(netloc='a1_${query}', query='q1_${query}', fragment='f1'),\n pr(path='p2'),\n r'//a1_?q1_query#f1',\n id='Template Syntax5: consume ${query}'\n ),\n pytest.param(\n pr(netloc='a1', params='prm1', query='q1_${query}', fragment='f1'),\n pr(path='p2', params='prm2', query='q2'),\n r'//a1/;prm1;prm2?q1_q2#f1',\n id='Template Syntax6: consume ${query}'\n ),\n )\n )\n def test_combine_parseresult(self,\n pr1: ParseResult,\n pr2: ParseResult,\n expected: str):\n actual = RedirectHandler.combine_parseresult(pr1, pr2)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'mesg, end',\n (\n pytest.param('', None),\n pytest.param('', ''),\n pytest.param('A', None),\n pytest.param('B', ''),\n pytest.param('C', '\\n'),\n )\n )\n def test_print_debug(self,\n mesg: str,\n end: str):\n print_debug(mesg, end=end)\n\n @pytest.mark.parametrize(\n 'href, text, expected',\n (\n pytest.param('', None, ''),\n pytest.param('', '', ''),\n pytest.param('ABC', None, 'ABC'),\n pytest.param('ABC', '', ''),\n pytest.param('ABC', '123', '123'),\n pytest.param('<>', '<>', '\"><>'),\n )\n )\n def test_html_a(self,\n href,\n text,\n expected):\n actual = html_a(href, text)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'from_to, expected',\n (\n pytest.param(\n [('a', 'b',)], {'a': Re_Entry('a', 'b')},\n # TODO: add more!\n ),\n )\n )\n def test_load_redirects_fromto(self,\n from_to: FromTo_List,\n expected: Re_Entry_Dict):\n actual = RedirectsLoader.load_redirects_fromto(from_to)\n assert actual == expected\n\n @pytest.mark.parametrize(\n 'input_, expected',\n (\n # simply happy path\n pytest.param(\n {'a': Re_Entry('a', 'b')},\n {'a': Re_Entry('a', 'b')},\n ),\n # reserved path\n pytest.param(\n {REDIRECT_PATHS_NOT_ALLOWED[0]: Re_Entry(REDIRECT_PATHS_NOT_ALLOWED[0], 'b')},\n {},\n ),\n # encoding not allowed\n pytest.param(\n {'a': Re_Entry('a', r'混沌')},\n {},\n ),\n # encoding allowed in `to` field\n pytest.param(\n {r'混沌': Re_Entry(r'混沌', 'b')},\n {r'混沌': Re_Entry(r'混沌', 'b')},\n ),\n )\n )\n def test_clean_redirects(self,\n input_: Re_Entry_Dict,\n expected: Re_Entry_Dict):\n actual = RedirectsLoader.clean_redirects(input_)\n assert actual == expected\n\n\nIP = '127.0.0.3'\nPORT = 33797 # an unlikely port to be used\nENTRY_LIST = {'/a': ('b', USER, NOW)}\n\n\ndef port() -> int:\n \"\"\"\n Use a new port for each new RedirectServer instance.\n\n Some CI Services images tend to keep the port open after it's use. This\n means a new RedirectServer will raise\n OSError: [Errno 98] Address already in use\n This also implies it's difficult to search for an unused port because\n that would require testing if the port can be opened.\n This is good enough.\n \"\"\"\n global PORT\n PORT += 1\n return PORT\n\n\ndef new_redirect_handler(redirects: Re_Entry_Dict) \\\n -> RedirectHandler:\n return redirect_handler_factory(\n redirects,\n REDIRECT_CODE_DEFAULT,\n '/status',\n '/reload',\n htmls('')\n )\n\n\ndef shutdown_server_thread(redirect_server: RedirectServer, sleep: float = 4):\n\n # thread target\n def shutdown_do(redirect_server_, sleep_):\n time.sleep(sleep_)\n redirect_server_.shutdown()\n\n st = threading.Thread(\n name='pytest-shutdown_thread',\n target=shutdown_do,\n args=(redirect_server, sleep))\n st.start()\n return st\n\n\n# XXX: crude way to pass object from a thread back to main thread\nRequest_Thread_Return = None\n\nreq_count = 0\n\n\ndef request_thread(ip: str, port: int, url: str, method: str, wait: float):\n \"\"\"caller should `.join` on thread\"\"\"\n\n # thread target\n def request_do(ip_: str, port_: int, url_: str, method_: str, wait_: float):\n time.sleep(wait_)\n cl = client.HTTPConnection(ip_, port=port_, timeout=1)\n cl.request(method_, url_)\n global Request_Thread_Return\n Request_Thread_Return = cl.getresponse()\n\n global req_count\n req_count += 1\n rt = threading.Thread(\n name='pytest-request_thread-%d' % req_count,\n target=request_do,\n args=(ip, port, url, method, wait))\n rt.start()\n return rt\n\n\nclass Test_ClassesComplex(object):\n\n def test_RedirectServer_server_activate(self):\n with RedirectServer((IP, port()), new_redirect_handler(ENTRY_LIST)) as redirect_server:\n redirect_server.server_activate()\n\n @pytest.mark.timeout(5)\n def test_RedirectServer_serve_forever(self):\n with RedirectServer((IP, port()), new_redirect_handler(ENTRY_LIST)) as redirect_server:\n _ = shutdown_server_thread(redirect_server, 1)\n redirect_server.serve_forever(poll_interval=0.3) # blocks\n\n\nclass Test_LiveServer(object):\n \"\"\"run the entire server which will bind to a real IP + Port\"\"\"\n\n F302 = int(http.HTTPStatus.FOUND) # 302\n NF404 = int(http.HTTPStatus.NOT_FOUND) # 404\n R308 = int(REDIRECT_CODE_DEFAULT) # 308\n ERR501 = int(http.HTTPStatus.NOT_IMPLEMENTED) # 501\n\n URL = 'http://' + IP\n\n rd = {'/a': Re_Entry('/a', 'A',)}\n\n @pytest.mark.parametrize(\n 'ip, url, method, redirects, loe, hi, header',\n (\n #\n # broad checks\n #\n pytest.param(IP, URL, 'GET', {}, 200, 499, None, id='broad check GET empty'),\n pytest.param(IP, URL, 'HEAD', {}, 200, 499, None, id='broad check HEAD empty'),\n pytest.param(IP, URL + '/X', 'GET', rd, 200, 499, None, id='broad check /X GET'),\n pytest.param(IP, URL + '/X', 'HEAD', rd, 200, 499, None, id='broad check /X HEAD'),\n #\n # precise checks - typical use-cases\n #\n pytest.param(IP, URL + '/X', 'GET', rd, NF404, None, ('Location', None), id='GET Not Found'),\n pytest.param(IP, URL + '/X', 'HEAD', rd, NF404, None, ('Location', None), id='HEAD Not Found'),\n # the two happy-path Redirect Found cases\n pytest.param(IP, URL + '/a', 'GET', rd, R308, None, ('Location', 'A'), id='GET Found'),\n pytest.param(IP, URL + '/a', 'HEAD', rd, R308, None, ('Location', 'A'), id='HEAD Found'),\n # make sure empty and None redirects is handled\n pytest.param(IP, URL + '/a', 'GET', {}, NF404, None, ('Location', None), id='/a GET empty'),\n pytest.param(IP, URL + '/a', 'HEAD', {}, NF404, None, ('Location', None), id='/a HEAD empty'),\n #\n # make sure other HTTP methods do nothing\n #\n pytest.param(IP, URL, 'POST', {}, ERR501, None, None, id='POST empty'),\n pytest.param(IP, URL, 'PUT', {}, ERR501, None, None, id='PUT empty'),\n pytest.param(IP, URL, 'DELETE', {}, ERR501, None, None, id='DELETE empty'),\n pytest.param(IP, URL, 'OPTIONS', {}, ERR501, None, None, id='OPTIONS empty'),\n pytest.param(IP, URL, 'TRACE', {}, ERR501, None, None, id='TRACE empty'),\n pytest.param(IP, URL, 'PATCH', {}, ERR501, None, None, id='PATCH empty'),\n pytest.param(IP, URL + '/a', 'POST', rd, ERR501, None, None, id='POST /a'),\n pytest.param(IP, URL + '/a', 'PUT', rd, ERR501, None, None, id='PUT /a'),\n pytest.param(IP, URL + '/a', 'DELETE', rd, ERR501, None, None, id='DELETE /a'),\n pytest.param(IP, URL + '/a', 'OPTIONS', rd, ERR501, None, None, id='OPTIONS /a'),\n pytest.param(IP, URL + '/a', 'TRACE', rd, ERR501, None, None, id='TRACE /a'),\n pytest.param(IP, URL + '/a', 'PATCH', rd, ERR501, None, None, id='PATCH /a'),\n pytest.param(IP, URL + '/', 'POST', rd, ERR501, None, None, id='POST /'),\n pytest.param(IP, URL + '/.', 'POST', rd, ERR501, None, None, id='POST /.'),\n )\n )\n @pytest.mark.timeout(4)\n def test_requests(self,\n ip: str,\n url: str,\n method: str,\n redirects: typing.Optional[Re_Entry_Dict],\n loe: int, # low bound or equal (assertion)\n hi: typing.Optional[int], # high bound or None (assertion)\n header: typing.Optional[typing.Tuple[str, str]] # assertion\n ):\n port_ = port()\n with RedirectServer((ip, port_), new_redirect_handler(redirects)) as redirect_server:\n # XXX: crude synchronizations. Good enough for this test harness!\n wait = 0.5\n srv_uptime = wait + 0.5\n thr_wait = wait\n shutdown_server_thread(redirect_server, srv_uptime)\n rt = request_thread(ip, port_, url, method, wait)\n redirect_server.serve_forever(poll_interval=0.2) # blocks for srv_uptime until server is shutdown\n rt.join(thr_wait) # blocks for thr_wait until thread ends\n\n # assertions\n assert not rt.is_alive(), 'thread did not end within %s seconds' % thr_wait\n global Request_Thread_Return\n assert Request_Thread_Return is not None, 'the thread did not set the global Request_Thread_Return; unlucky time synch? did the thread crash?'\n rr = Request_Thread_Return\n Request_Thread_Return = None\n if hi is None and loe:\n assert loe == rr.code\n elif hi and loe:\n assert loe <= rr.code <= hi, \"ip=(%s) url=(%s) method=(%s)\" % (ip, url, method)\n if header:\n assert rr.getheader(header[0]) == header[1], \"getheaders: %s\" % rr.getheaders()\n", "sub_path": "goto_http_redirect_server/test/test_goto_http_redirect_server.py", "file_name": "test_goto_http_redirect_server.py", "file_ext": "py", "file_size_in_byte": 31268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "typing.Optional", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server", "line_number": 49, "usage_type": "attribute"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server", "line_number": 50, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "getpass.getuser", "line_number": 55, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.to_ParseResult", "line_number": 58, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_EntryType", "line_number": 59, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 68, "usage_type": "call"}, {"api_name": "urllib.parse.ParseResult", "line_number": 69, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 154, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 155, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 157, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 82, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 87, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 90, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 91, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 93, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 94, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 95, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 96, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 97, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 98, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 99, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 102, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 103, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 104, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 105, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 106, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 108, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 109, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 110, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 111, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 113, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 114, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 115, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 116, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 117, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 118, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 119, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 120, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 121, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 122, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 123, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 124, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 125, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 126, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 127, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 128, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 129, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 130, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 131, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 132, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 133, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 134, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 135, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 136, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 137, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 138, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 139, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 140, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 141, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 142, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 144, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 145, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 173, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.html_escape", "line_number": 174, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 163, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 166, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 166, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 167, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 167, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 168, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 168, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 169, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 169, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 170, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 170, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.html_a", "line_number": 193, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 178, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 181, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 182, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 183, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 184, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 185, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.fromisoformat", "line_number": 210, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 196, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 202, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 202, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 203, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 203, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 204, "usage_type": "call"}, {"api_name": "urllib.parse.ParseResult", "line_number": 223, "usage_type": "name"}, {"api_name": "urllib.parse.ParseResult", "line_number": 224, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler.query_match", "line_number": 226, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler", "line_number": 226, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 213, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 216, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 217, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 218, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 219, "usage_type": "call"}, {"api_name": "urllib.parse.ParseResult", "line_number": 348, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry_Dict", "line_number": 349, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 350, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler.query_match_finder", "line_number": 351, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler", "line_number": 351, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 228, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 233, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 235, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 236, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 238, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 240, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 243, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 245, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 246, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 248, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 250, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 251, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 253, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 255, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 256, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 258, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 260, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 261, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 263, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 265, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 268, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 270, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 271, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 273, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 275, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 276, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 278, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 280, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 283, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 285, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 286, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 288, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 290, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 293, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 295, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 296, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 298, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 300, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 301, "usage_type": "call"}, {"api_name": "urllib.parse.ParseResult", "line_number": 510, "usage_type": "name"}, {"api_name": "urllib.parse.ParseResult", "line_number": 511, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler.combine_parseresult", "line_number": 513, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler", "line_number": 513, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 355, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 355, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 364, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 370, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 376, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 382, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 388, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 394, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 401, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 407, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 413, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 419, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 426, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 432, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 438, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 444, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 450, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 456, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 462, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 471, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 477, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 483, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 489, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 495, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 501, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.print_debug", "line_number": 529, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 516, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 516, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 519, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 520, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 521, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 522, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 523, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.html_a", "line_number": 546, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 531, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 531, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 534, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 535, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 536, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 537, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 538, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 539, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.FromTo_List", "line_number": 559, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry_Dict", "line_number": 560, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectsLoader.load_redirects_fromto", "line_number": 561, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectsLoader", "line_number": 561, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 549, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 549, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 552, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 553, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry_Dict", "line_number": 590, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry_Dict", "line_number": 591, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectsLoader.clean_redirects", "line_number": 592, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectsLoader", "line_number": 592, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 564, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 564, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 568, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 569, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 570, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 573, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.REDIRECT_PATHS_NOT_ALLOWED", "line_number": 574, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 574, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 578, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 579, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 583, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 584, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 585, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry_Dict", "line_number": 617, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.redirect_handler_factory", "line_number": 619, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.REDIRECT_CODE_DEFAULT", "line_number": 621, "usage_type": "argument"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.htmls", "line_number": 624, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectHandler", "line_number": 618, "usage_type": "name"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectServer", "line_number": 628, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 632, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 635, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 654, "usage_type": "call"}, {"api_name": "http.client.HTTPConnection", "line_number": 655, "usage_type": "call"}, {"api_name": "http.client", "line_number": 655, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 662, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectServer", "line_number": 673, "usage_type": "call"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectServer", "line_number": 678, "usage_type": "call"}, {"api_name": "pytest.mark.timeout", "line_number": 676, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 676, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 686, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 687, "usage_type": "attribute"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.REDIRECT_CODE_DEFAULT", "line_number": 688, "usage_type": "argument"}, {"api_name": "http.HTTPStatus", "line_number": 689, "usage_type": "attribute"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry", "line_number": 693, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 740, "usage_type": "attribute"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.Re_Entry_Dict", "line_number": 740, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 742, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 743, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 743, "usage_type": "attribute"}, {"api_name": "goto_http_redirect_server.goto_http_redirect_server.RedirectServer", "line_number": 746, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 695, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 695, "usage_type": "attribute"}, {"api_name": "pytest.param", "line_number": 701, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 702, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 703, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 704, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 708, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 709, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 711, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 712, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 714, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 715, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 719, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 720, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 721, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 722, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 723, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 724, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 725, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 726, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 727, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 728, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 729, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 730, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 731, "usage_type": "call"}, {"api_name": "pytest.param", "line_number": 732, "usage_type": "call"}, {"api_name": "pytest.mark.timeout", "line_number": 735, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 735, "usage_type": "attribute"}]} +{"seq_id": "436765993", "text": "\"\"\"\nThis module provides signals, which are a simple dispatching system that allows any number of interested parties\nto subscribe to events (\"signals\").\n\nThis is similar to the Blinker library (https://pypi.org/project/blinker/), with the following changes:\n - provides only a small subset of Blinker's functionality\n - supports type hints\n - supports async receivers.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport inspect\nimport weakref\nfrom collections.abc import Awaitable\nfrom collections.abc import Callable\nfrom typing import Any\nfrom typing import cast\nfrom typing import Generic\nfrom typing import ParamSpec\nfrom typing import TypeVar\n\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\ndef make_weak_ref(obj: Any) -> weakref.ReferenceType:\n \"\"\"\n Like weakref.ref(), but using weakref.WeakMethod for bound methods.\n \"\"\"\n if hasattr(obj, \"__self__\"):\n return cast(weakref.ref, weakref.WeakMethod(obj))\n else:\n return weakref.ref(obj)\n\n\n# We're running into https://github.com/python/mypy/issues/6073 here,\n# which is why the base class is a mixin and not a generic superclass.\nclass _SignalMixin:\n def __init__(self) -> None:\n self.receivers: list[weakref.ref[Callable]] = []\n\n def connect(self, receiver: Callable) -> None:\n \"\"\"\n Register a signal receiver.\n\n The signal will only hold a weak reference to the receiver function.\n \"\"\"\n receiver = make_weak_ref(receiver)\n self.receivers.append(receiver)\n\n def disconnect(self, receiver: Callable) -> None:\n self.receivers = [r for r in self.receivers if r() != receiver]\n\n def notify(self, *args, **kwargs):\n cleanup = False\n for ref in self.receivers:\n r = ref()\n if r is not None:\n yield r(*args, **kwargs)\n else:\n cleanup = True\n if cleanup:\n self.receivers = [r for r in self.receivers if r() is not None]\n\n\nclass _SyncSignal(Generic[P], _SignalMixin):\n def connect(self, receiver: Callable[P, None]) -> None:\n assert not asyncio.iscoroutinefunction(receiver)\n super().connect(receiver)\n\n def disconnect(self, receiver: Callable[P, None]) -> None:\n super().disconnect(receiver)\n\n def send(self, *args: P.args, **kwargs: P.kwargs) -> None:\n for ret in super().notify(*args, **kwargs):\n assert ret is None or not inspect.isawaitable(ret)\n\n\nclass _AsyncSignal(Generic[P], _SignalMixin):\n def connect(self, receiver: Callable[P, Awaitable[None] | None]) -> None:\n super().connect(receiver)\n\n def disconnect(self, receiver: Callable[P, Awaitable[None] | None]) -> None:\n super().disconnect(receiver)\n\n async def send(self, *args: P.args, **kwargs: P.kwargs) -> None:\n await asyncio.gather(\n *[\n aws\n for aws in super().notify(*args, **kwargs)\n if aws is not None and inspect.isawaitable(aws)\n ]\n )\n\n\n# noinspection PyPep8Naming\ndef SyncSignal(receiver_spec: Callable[P, None]) -> _SyncSignal[P]:\n \"\"\"\n Create a synchronous signal with the given function signature for receivers.\n\n Example:\n\n s = SyncSignal(lambda event: None) # all receivers must accept a single \"event\" argument.\n def receiver(event):\n print(event)\n\n s.connect(receiver)\n s.send(\"foo\") # prints foo\n s.send(event=\"bar\") # prints bar\n\n def receiver2():\n ...\n\n s.connect(receiver2) # mypy complains about receiver2 not having the right signature\n\n s2 = SyncSignal(lambda: None) # this signal has no arguments\n s2.send()\n \"\"\"\n return cast(_SyncSignal[P], _SyncSignal())\n\n\n# noinspection PyPep8Naming\ndef AsyncSignal(receiver_spec: Callable[P, Awaitable[None] | None]) -> _AsyncSignal[P]:\n \"\"\"\n Create an signal that supports both regular and async receivers:\n\n Example:\n\n s = AsyncSignal(lambda event: None)\n async def receiver(event):\n print(event)\n s.connect(receiver)\n await s.send(\"foo\") # prints foo\n \"\"\"\n return cast(_AsyncSignal[P], _AsyncSignal())\n", "sub_path": "mitmproxy/utils/signals.py", "file_name": "signals.py", "file_ext": "py", "file_size_in_byte": 4189, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "typing.ParamSpec", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 33, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 33, "usage_type": "attribute"}, {"api_name": "weakref.WeakMethod", "line_number": 33, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 35, "usage_type": "call"}, {"api_name": "weakref.ReferenceType", "line_number": 28, "usage_type": "attribute"}, {"api_name": "weakref.ref", "line_number": 42, "usage_type": "attribute"}, {"api_name": "collections.abc.Callable", "line_number": 42, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 44, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 68, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 69, "usage_type": "name"}, {"api_name": "asyncio.iscoroutinefunction", "line_number": 70, "usage_type": "call"}, {"api_name": "collections.abc.Callable", "line_number": 73, "usage_type": "name"}, {"api_name": "inspect.isawaitable", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.Generic", "line_number": 81, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 82, "usage_type": "name"}, {"api_name": "collections.abc.Awaitable", "line_number": 82, "usage_type": "name"}, {"api_name": "collections.abc.Callable", "line_number": 85, "usage_type": "name"}, {"api_name": "collections.abc.Awaitable", "line_number": 85, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 89, "usage_type": "call"}, {"api_name": "inspect.isawaitable", "line_number": 93, "usage_type": "call"}, {"api_name": "collections.abc.Callable", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 121, "usage_type": "call"}, {"api_name": "collections.abc.Callable", "line_number": 125, "usage_type": "name"}, {"api_name": "collections.abc.Awaitable", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "622998500", "text": "import os\nfrom HorseRace.common import *\nfrom pathlib import Path\nfrom urllib import request\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nfrom datetime import timedelta\nimport numpy as np\nfrom urllib import parse\nimport csv\n\ndef get_pages(url):\n\tres = request.urlopen(url)\n\tsoup = BeautifulSoup(res,\"html.parser\")\n\tres.close()\n\treturn soup\n\ndef get_tables(url):\n\tdata = pd.read_html(url)\n\treturn data\n\ndef get_driver(opt=None):\n\t# dp = r\"C:\\driver\\96\\chromedriver.exe\"\n\toptions = Options()\n\tif opt:\n\t\toptions.add_argument('--headless')\n\treturn webdriver.Chrome(ChromeDriverManager().install(), options=options)\n\t# return webdriver.Chrome(dp, options=options)\n\n\ndef set_login(d):\n\td.get(r\"https://regist.netkeiba.com/account/?pid=login\")\n\t# d.switch_to_window(d.window_handles[1])\n\tloginid = d.find_element_by_name(\"login_id\")\n\tpswd = d.find_element_by_name('pswd')\n\tlogbtn = d.find_element_by_class_name('loginBtn__wrap').find_element_by_tag_name('input')\n\tloginid.send_keys(os.environ['email'])\n\tpswd.send_keys(os.environ['passwd'])\n\tlogbtn.click()\n\treturn d\n\n\ndef categorize_races(rname):\n\tf = open(r\"D:\\horserace\\data\\race\\mst\\race.csv\",encoding='utf8')\n\tif not f.exists():\n\t\tf = open(r\"E:\\horserace\\data\\race\\mst\\race.csv\",encoding='utf8')\n\ttxt = csv.reader(f)\n\ttext = list(txt)\n\tG1,G2,G3=[],[],[]\n\tfor t in text:\n\t\tif t[0] == 'G1':\n\t\t\tG1.append(t[1])\n\t\telif t[0] == 'G2':\n\t\t\tG2.append(t[1])\n\t\telif t[0] == 'G3':\n\t\t\tG3.append(t[1])\n\tcategory = ''\n\tif 'G1' in rname or rname in G1:\n\t\tcategory = '01.G1'\n\telif 'G2' in rname or rname in G2:\n\t\tcategory = '02.G2'\n\telif 'G3' in rname or rname in G3:\n\t\tcategory = '03.G3'\n\telif 'OP' in rname or 'S' in rname or 'ステークス' in rname or '賞' in rname or '杯' in rname or '特別' in rname :\n\t\tcategory = '04.OP'\n\telif 'L' in rname:\n\t\tcategory = '05.L'\n\telif '1600万下' in rname:\n\t\tcategory = '06.1600万下'\n\telif '1000万下' in rname:\n\t\tcategory = '07.1000万下'\n\telif '500万下' in rname:\n\t\tcategory = '08.500万下'\n\telif '3勝クラス' in rname:\n\t\tcategory = '09.3勝クラス'\n\telif '2勝クラス' in rname:\n\t\tcategory = '10.2勝クラス'\n\telif '1勝クラス' in rname:\n\t\tcategory = '11.1勝クラス'\n\telif '2歳未勝利' in rname:\n\t\tcategory = '12.2歳未勝利'\n\telif '3歳未勝利' in rname:\n\t\tcategory = '13.3歳未勝利'\n\telif '3歳以上未勝利' in rname:\n\t\tcategory = '13.3歳未勝利'\n\telif '3歳新馬' in rname:\n\t\tcategory = '14.3歳新馬'\n\telif '2歳新馬' in rname:\n\t\tcategory = '15.2歳新馬'\n\telse:\n\t\tprint(rname, 'OP?')\n\t\tcategory = '04.OP'\n\tf.close()\n\treturn category\n\ndef categorize_races_bk(rname):\n\tcategory = ''\n\tif '(G1)' in rname:\n\t\tcategory = '01.G1'\n\telif '(G2)' in rname:\n\t\tcategory = '02.G2'\n\telif '(G3)' in rname:\n\t\tcategory = '03.G3'\n\telif '(OP)' in rname:\n\t\tcategory = '04.OP'\n\telif '(L)' in rname:\n\t\tcategory = '05.L'\n\telif '1600万下' in rname:\n\t\tcategory = '06.1600万下'\n\telif '1000万下' in rname:\n\t\tcategory = '07.1000万下'\n\telif '500万下' in rname:\n\t\tcategory = '08.500万下'\n\telif '3勝クラス' in rname:\n\t\tcategory = '09.3勝クラス'\n\telif '2勝クラス' in rname:\n\t\tcategory = '10.2勝クラス'\n\telif '1勝クラス' in rname:\n\t\tcategory = '11.1勝クラス'\n\telif '2歳未勝利' in rname:\n\t\tcategory = '12.2歳未勝利'\n\telif '3歳未勝利' in rname:\n\t\tcategory = '13.3歳未勝利'\n\telif '3歳新馬' in rname:\n\t\tcategory = '14.3歳新馬'\n\telif '2歳新馬' in rname:\n\t\tcategory = '15.2歳新馬'\n\treturn category\n\n\n\ndef split_distance(df):\n\tprint(type(df),df['距離'])\n\tdtype = ''\n\tif isinstance(df['距離'], str):\n\t\tif df['距離'][0] == '芝':\n\t\t\tdtype = '芝'\n\t\telif df['距離'][0] == 'ダ':\n\t\t\tdtype = 'ダ'\n\t\telif df['距離'][0] == '障':\n\t\t\tdtype = '障'\n\telse:\n\t\tif df['距離'].str[0] == '芝':\n\t\t\tdtype = '芝'\n\t\telif df['距離'].str[0] == 'ダ':\n\t\t\tdtype = 'ダ'\n\t\telif df['距離'].str[0] == '障':\n\t\t\tdtype = '障'\n\t\telse:\n\t\t\tprint('不明', df['距離'])\n\t\t\treturn ''\n\tdf['距離'] = df['距離'].replace('芝','').replace('ダ','').replace('障','')\n\tdf['タイプ'] = dtype\n\treturn df\n\ndef convert_time(time):\n\tm = int(time[0])\n\ts = int(time[2:4])\n\tss = int(time[5])*100000\n\ttd = timedelta(minutes=m,seconds=s,microseconds=ss).total_seconds()\n\treturn td\n\ndef convert_time2(time):\n\tif type(time) is str:\n\t\tm = int(str(time[0]))\n\t\ts = int(str(time[2:4]))\n\t\tss = int(str(time[5]))*100000\n\t\ttd = timedelta(minutes=m,seconds=s,microseconds=ss).total_seconds()\n\telse:\n\t\tprint(type(time),time)\n\t\ttd = None\n\treturn td\n\ndef convert_time3(time):\n\tif type(time) is str:\n\t\t# m = int(str(time[0]))\n\t\ts = int(str(time[0:2]))\n\t\tss = int(str(time[3:4]))*100000\n\t\ttd = timedelta(seconds=s,microseconds=ss).total_seconds()\n\telse:\n\t\tprint(type(time),time)\n\t\ttd = None\n\treturn td\n\ndef get_all_horse_data():\n\thp = Path(r\"D:\\horserace\\data\\horse\")\n\tif not hp.exists():\n\t\thp = Path(r\"E:\\horserace\\data\\horse\")\n\thds = hp.glob('*.csv')\n\tfor i,hd in enumerate(hds):\n\t\thdd = pd.read_csv(hd)\n\t\thdd['hid'] = hd.name[:10]\n\t\tif i == 0:\n\t\t\thdd2 = hdd.copy()\n\t\telse:\n\t\t\thdd2 = hdd2.append(hdd)\n\n\n\t# Index(['Unnamed: 0', '日付', '開催', '天気', 'レース名', '距離', '頭数', '枠番', '馬番', '人気',\n # '着順', '着差', '騎手', '斤量', 'タイム', '通過', 'ペース', '上り', '馬体重', '勝馬', 'hid',\n # 'Unnamed: 0.1'],\n # dtype='object')\n\thdd2 = hdd2.dropna(how='any')\n\thdd2['開催'] = hdd2['開催'].replace('[0-9]','',regex=True)\n\thdd2['グレード'] = hdd2['レース名'].apply(categorize_races)\n\thdd2 = hdd2.apply(lambda x: split_distance(x), axis=1)\n\t# 距離 intに\n\thdd2['time2'] = hdd2['タイム'].apply(convert_time)\n\treturn hdd2\n\ndef select_rid(url):\n\tpu = parse.urlparse(url)\n\tridd = pu.query.replace('race_id=','')[:12]\n\tprint(ridd)\n\treturn ridd\n\ndef select_racename(r):\n lpos = r.find('R ')\n rpos = r.find(' 結果')\n return r[lpos+3:rpos]\n\ndef select_hid(url):\n\tpu = parse.urlparse(url)\n\thidd = pu.query.replace('id=','')[:10]\n\tprint(hidd)\n\treturn hidd\n", "sub_path": "scraping/HorseRace/HorseRace/common/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "urllib.request.urlopen", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 16, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 30, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 30, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 182, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 189, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 191, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 194, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 215, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 215, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 226, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 226, "usage_type": "name"}]} +{"seq_id": "582283132", "text": "from pymongo import MongoClient\nimport pandas as pd\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport gc\n\n#Artists = db.music_artists\n#Albums = db.music_albums\n\n\ndef get_data_track(query, start, end):\n client = MongoClient('mongodb://localhost:27017')\n db = client.spotify\n \n Tracks = db.music_tracks\n Albums = db.music_albums\n\n cursor = Tracks.find(query, {'_id': False}).skip(start).limit(end)\n list_cur = list(cursor)\n for track in list_cur:\n try:\n track['album'] = Albums.find_one({'id': track['album_id']}, {'_id': False})\n except:\n track['album'] = None\n client.close()\n return list_cur\n\ndef get_data_album(query, start, end):\n client = MongoClient('mongodb://localhost:27017')\n db = client.spotify\n\n Albums = db.music_albums\n\n cursor = Albums.find(query, {'_id': False}).skip(start).limit(end)\n list_cur = list(cursor)\n client.close()\n return list_cur\n\ndef make_audio_feature(data):\n client = MongoClient('mongodb://localhost:27017')\n db = client.spotify\n\n Tracks = db.music_tracks\n AF = db.tracks_af_clustered\n cluster_features = [\n 'acousticness',\n 'danceability',\n 'energy',\n 'valence'\n ]\n\n ctemp = [{\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}, \n {\"valence\": 0,\"energy\": 0,\"danceability\": 0,\"acousticness\": 0,}]\n clen = [0, 0, 0, 0, 0]\n for track in data:\n af = AF.find_one({'id': track['songid']}, {'_id': False})\n \n ctemp[af['cluster']]['valence'] += af['valence']\n ctemp[af['cluster']]['energy'] += af['energy']\n ctemp[af['cluster']]['danceability'] += af['danceability']\n ctemp[af['cluster']]['acousticness'] += af['acousticness']\n clen[af['cluster']]+=1\n\n result = []\n\n for i in range(0, 5):\n if clen[i] == 0:\n continue\n ctemp[i]['valence'] = ctemp[i]['valence']/clen[i]\n ctemp[i]['energy'] = ctemp[i]['energy']/clen[i]\n ctemp[i]['danceability'] = ctemp[i]['danceability']/clen[i]\n ctemp[i]['acousticness'] = ctemp[i]['acousticness']/clen[i]\n \n audio_feature_df = pd.DataFrame(columns=[\n \"valence\",\n \"energy\",\n \"danceability\",\n \"acousticness\"\n ])\n \n\n audio_feature = AF.find({'cluster': i}, {'_id': False})\n \n #idx = 1\n #for af in audio_feature:\n # data_info = pd.DataFrame({\n # \"valence\": af['valence'],\n # \"energy\": af['energy'],\n # \"danceability\": af['danceability'],\n # \"acousticness\": af['acousticness'],\n # }, index=[idx])\n # idx+=1\n # audio_feature_df = audio_feature_df.append(data_info, sort=True)\n #audio_feature_df = audio_feature_df.append(pd.DataFrame(ctemp[i], index=[0]), sort=True)\n\n afList = list(audio_feature)\n #afList.insert(0, ctemp[i])\n audio_feature_df = pd.DataFrame(afList)\n audio_feature_df = audio_feature_df[cluster_features]\n audio_feature_df.loc[:,['valence']] = audio_feature_df.loc[:,['valence']].astype('float32')\n audio_feature_df.loc[:,['energy']] = audio_feature_df.loc[:,['energy']].astype('float32')\n audio_feature_df.loc[:,['danceability']] = audio_feature_df.loc[:,['danceability']].astype('float32')\n audio_feature_df.loc[:,['acousticness']] = audio_feature_df.loc[:,['acousticness']].astype('float32')\n \n te = [ctemp[i]['acousticness'],ctemp[i]['danceability'],ctemp[i]['energy'],ctemp[i]['valence']]\n dists = euclidean_distances(audio_feature_df, pd.DataFrame(ctemp[i], index=[0]))\n \n six = [1, 1, 1, 1, 1, 1]\n li = []\n for dis in dists:\n for j in range(0, 6):\n if six[j] > dis[0]:\n six[j] = dis[0]\n break\n li.append(dis[0])\n \n re = []\n for val in six:\n re.append(afList[li.index(val)]['id'])\n\n result.append({'cluster' : i, 'data' : re})\n del audio_feature_df\n del dists\n gc.collect()\n \n client.close()\n return result\n", "sub_path": "backend/ReMu/connectdb.py", "file_name": "connectdb.py", "file_ext": "py", "file_size_in_byte": 4447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pymongo.MongoClient", "line_number": 11, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 28, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.euclidean_distances", "line_number": 108, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "11123434", "text": "##Chris Barker\n#!/usr/bin/env python\n\n\"\"\"\nA simple test of the GridBagSizer\n\nhttp://wiki.wxpython.org/index.cgi/WriteItYourself\n\n\"\"\"\n\nimport wx\n\nclass MyFrame(wx.Frame):\n def __init__(self, parent, ID, title):\n wx.Frame.__init__(self, parent, ID, title, wx.DefaultPosition)\n\n Buttons = []\n for i in range(6):\n Buttons.append(wx.Button(self,-1, \"Button %i\"%(i)))\n\n sizer = wx.GridBagSizer(9, 9)\n sizer.Add(Buttons[0], (0, 0), wx.DefaultSpan, wx.ALL, 5)\n sizer.Add(Buttons[1], (1, 1), (1,7), wx.EXPAND)\n sizer.Add(Buttons[2], (6, 6), (3,3), wx.EXPAND)\n sizer.Add(Buttons[3], (3, 0), (1,1), wx.ALIGN_CENTER)\n sizer.Add(Buttons[4], (4, 0), (1,1), wx.ALIGN_LEFT)\n sizer.Add(Buttons[5], (5, 0), (1,1), wx.ALIGN_RIGHT)\n\n sizer.AddGrowableRow(6)\n sizer.AddGrowableCol(6)\n\n self.SetSizerAndFit(sizer)\n self.Centre()\n\n\nclass MyApp(wx.App):\n def OnInit(self):\n frame = MyFrame(None, -1, \"wx.gridbagsizer.py\")\n frame.Show(True)\n self.SetTopWindow(frame)\n return True\n\nif __name__ == \"__main__\":\n app = MyApp(0)\n app.MainLoop()\n\n", "sub_path": "docs/sphinx/rest_substitutions/snippets/python/contrib/GridBagSizer.1.py", "file_name": "GridBagSizer.1.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "wx.Frame", "line_number": 13, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 19, "usage_type": "call"}, {"api_name": "wx.GridBagSizer", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.DefaultSpan", "line_number": 22, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 22, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 24, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_LEFT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "254176715", "text": "#!/usr/bin/env python\nfrom PIL import Image\nimport numpy as np\nimport sys\nimport os\nimport subprocess\n\n\ndef image2dat(img_name,ndisk,radius,mask_name=None,factor=100):\n ndisk=int(ndisk)\n radius=int(radius)\n if ( factor != '100' ):\n cmd='convert -resize '+str(factor)+'% '+ str(img_name)+' img_temp'\n subprocess.call(cmd,shell=True)\n img_name='img_temp'\n\n #open file in fileList:\n img_file = Image.open(img_name)\n\n\n # get original image parameters...\n height, width = img_file.size\n\n \n data_mask=np.zeros([width,height])\n data_mask[:,:]=255\n\n check = 2\n \n for i in range(ndisk) :\n x_center=int(np.random.rand()*width)\n y_center=int(np.random.rand()*height)\n \n for ip in range( x_center - radius - check, x_center + radius + check):\n for jp in range( y_center - radius - check, y_center + radius + check) :\n if ( ip 1:\n image2dat(*sys.argv[1:])\n else:\n raise SystemExit(\"usage: python image2dat image data [grid]\")\n", "sub_path": "python_scripts/network_impainting/makemask.py", "file_name": "makemask.py", "file_ext": "py", "file_size_in_byte": 1317, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "subprocess.call", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "547749855", "text": "#Author: Nick Lyubenko\n\nfrom tkinter import *\nimport tkinter.messagebox as tm\nimport socket\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.ttk import *\nfrom threading import Thread\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom datetime import datetime\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass MainApplication:\n\n def __init__(self, master):\n\n def Nike(info, driver, bool):\n\n wait = WebDriverWait(driver, 10)\n\n currentProcess = StringVar()\n\n try:\n driver.get(\"http://store.nike.com/us/en_us/?l=shop,login\")\n except Exception as e:\n currentProcess.set('Cannot connect to login page')\n print(e, 'Cannot connect to login page')\n\n currentProcess.set(\"Login page accessed\")\n try:\n driver.find_element_by_name(\"emailAddress\").send_keys(info['user'])\n driver.find_element_by_name('password').send_keys(info['pass'])\n driver.find_element_by_xpath('//input').click()\n except Exception as e:\n currentProcess.set('Cannot login to web page, check login info')\n print(e)\n\n currentProcess.set(\"Successfully logged in\")\n try:\n driver.get(str(info['link']))\n except Exception as e:\n currentProcess.set('Cannot access link provided')\n print(e)\n\n currentProcess.set(\"Shoe page accessed\")\n\n # opening size dropdown\n size_button = driver.find_element_by_xpath('//*[@id=\"exp-pdp-buying-tools-container\"]/form/div[1]/div[1]/a/span[2]')\n actions = ActionChains(driver)\n actions.move_to_element(size_button).click().perform()\n\n # selecting size\n size = driver.find_element_by_xpath(\n \"//li[contains(@class, 'nsg-form--drop-down--option') and normalize-space(.) = '\" + str(\n info['size']) + \"']\")\n actions = ActionChains(driver)\n actions.move_to_element(size).click().perform()\n\n currentProcess.set(\"Shoe size selected\")\n\n try:\n driver.find_element_by_id(\"buyingtools-add-to-cart-button\").click()\n driver.get(str(info['link']))\n except Exception as e:\n print(e)\n currentProcess.set('Shoe cannot be added to the cart, processes will reiterate')\n Nike(info, driver, bool)\n\n currentProcess.set(\"Shoe added to cart\")\n\n if bool:\n try:\n checkout_button = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, \".checkout-button\")))\n actions = ActionChains(driver)\n actions.move_to_element(checkout_button).click().perform()\n except Exception as e:\n print(e)\n\n def Adidas(info, driver, bool):\n wait = WebDriverWait(driver, 10)\n currentProcess = StringVar()\n\n adidasSizes = {'6.5': 2, '7': 3, '7.5': 4, '8': 5, '8.5': 6, '9': 7, '9.5': 8,\n '10': 9, '10.5': 10, '11': 11, '11.5': 12, '12': 13, '12.5': 14, '13': 15, '14': 16,\n '15': 17}\n\n try:\n driver.get(\"https://www.adidas.com/us/myaccount-create-or-login\")\n except Exception as e:\n currentProcess.set('Cannot connect to login page')\n print(e, 'Cannot connect to login page')\n\n currentProcess.set(\"Login page accessed\")\n\n try:\n driver.switch_to.frame(driver.find_element_by_xpath('//*[(@id = \"loginaccountframe\")]'))\n driver.find_element_by_id('username').send_keys(info['user'])\n driver.find_element_by_id('password').send_keys(info['pass'])\n driver.find_element_by_xpath('//*[@id=\"signinSubmit\"]').click()\n except Exception as e:\n print(e)\n currentProcess.set(\"Cannot login to web page, check login info\")\n\n currentProcess.set('Successfully logged in')\n\n driver.switch_to_default_content()\n driver.get(info['link'])\n\n # opening size dropdown\n size_button = driver.find_element_by_xpath(\n '//*[@id=\"buy-block\"]/div[1]/div[5]/div[3]/form/div[8]/div/div[1]/button')\n actions = ActionChains(driver)\n actions.move_to_element(size_button).click().perform()\n\n # selecting size\n size = driver.find_element_by_xpath(\n '//*[@id=\"buy-block\"]/div[1]/div[5]/div[3]/form/div[2]/div[2]/div/div/div/div[2]/div/ul/li[' +\n str(adidasSizes[info['size']]) + ']')\n\n actions = ActionChains(driver)\n actions.move_to_element(size).click().perform()\n\n try:\n driver.find_element_by_name(\"add-to-cart-button\").click()\n except Exception as e:\n print(e)\n currentProcess.set('Cannot add item to cart, process will reiterate')\n Adidas(info, driver, bool)\n\n if bool:\n try:\n driver.get('https://www.adidas.com/us/checkout-start')\n except Exception as e:\n print(e)\n\n def Champs(info, driver, bool):\n\n currentProcess = StringVar()\n wait = WebDriverWait(driver, 10)\n driver.implicitly_wait(10)\n try:\n driver.get(\"https://m.champssports.com/?uri=account\")\n except Exception as e:\n print(e)\n currentProcess.set('Cannot access login page')\n\n currentProcess.set('Successfully accessed login page')\n try:\n driver.find_element_by_xpath('//*[(@id = \"email\")]').send_keys(info['user'])\n driver.find_element_by_xpath('//*[@id=\"password\"]').send_keys(info['pass'])\n except Exception as e:\n print(e)\n currentProcess.set('Cannot login using credentials provided')\n\n currentProcess.set('Successfully logged in')\n\n size = '0'\n if (float(info['size']) < 10):\n size += str(info['size'])\n else:\n size = str(info['size'])\n try:\n int(size)\n size+='.0'\n except ValueError:\n x = 0\n\n try:\n driver.get(info['link'] + size)\n\n driver.switch_to.frame(driver.find_element_by_xpath('//*[@id=\"email_form_overlay\"]/iframe'))\n\n driver.find_element_by_xpath('//*[@id=\"overlay_header_close_button\"]').click()\n driver.switch_to_default_content()\n except Exception as e:\n print(e)\n currentProcess.set('Error in accessing link provided')\n try:\n size = wait.until(EC.visibility_of_element_located(\n (By.XPATH, '//*[@id=\"product_form\"]/div[1]/div[1]/div[3]/span[2]/div[1]/div[10]/div[2]/span/button')))\n actions = ActionChains(driver)\n actions.move_to_element(size).click().perform()\n except Exception as e:\n print(e)\n currentProcess = 'Cannot select shoe size, try another size'\n try:\n # adding to cart\n driver.find_element_by_xpath('//*[@id=\"product_form\"]/div[1]/div[1]/div[3]/span[2]/div[1]/div[10]/div[2]/span/button').click()\n except Exception as e:\n print(e)\n currentProcess.set('Cannot add item to cart, edit sizes or let program try again')\n if bool:\n try:\n checkout_button = wait.until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"flyin_container\"]/div/div[1]/a')))\n actions = ActionChains(driver)\n actions.move_to_element(checkout_button).click().perform()\n except Exception as e:\n print(e)\n\n def Footlocker(info, driver, bool):\n\n wait = WebDriverWait(driver, 10)\n driver.get(\"https://m.footlocker.com/?uri=account\")\n try:\n driver.find_element_by_name(\"email\").send_keys(info['user'])\n driver.find_element_by_name('password').send_keys(info['pass'])\n driver.find_element_by_xpath('//*[@id=\"account_access_log_in_button\"]').click()\n except Exception as e:\n print(e)\n\n size = '0'\n if (float(info['size']) < 10):\n size += str(info['size'])\n else:\n size = str(info['size'])\n try:\n int(size)\n size += '.0'\n except ValueError:\n x = 0\n try:\n driver.get(info['link'] + \"?cm=%3A%20QUICK%20VIEW%3A%20MORE%20INFO&size=\" + size)\n driver.find_element_by_name('pdp_addtocart').click()\n except Exception as e:\n print(e)\n if bool:\n try:\n driver.get('https://www.footlocker.com/checkout/?uri=checkout')\n except Exception as e:\n print(e)\n\n def Bodega(info, driver, bool):\n wait = WebDriverWait(driver, 10)\n driver.get(\"https://shop.bdgastore.com/account/login\")\n try:\n driver.find_element_by_xpath('//*[@id=\"account\"]/div[5]/a').click()\n except Exception as e:\n print(e)\n\n try:\n driver.find_element_by_id(\"customer_email\").send_keys(info['user'])\n driver.find_element_by_id('customer_password').send_keys(info['pass'])\n driver.find_element_by_xpath('//*[@id=\"customer_login\"]/div[4]/input').submit()\n except Exception as e:\n print(e)\n\n driver.get(info['link'])\n\n # opening size dropdown\n size_button = wait.until(EC.visibility_of_element_located(\n (By.XPATH, '//*[@id=\"swatches\"]/div[2]/ul/li[6]')))\n actions = ActionChains(driver)\n actions.move_to_element(size_button).click().perform()\n\n # adding to cart\n driver.find_element_by_xpath('//*[@id=\"add\"]').submit()\n\n if bool:\n try:\n driver.get('https://shop.bdgastore.com/cart')\n checkout_button = wait.until(\n EC.visibility_of_element_located((By.XPATH, '//*[@id=\"cart\"]/form/div[3]/input')))\n actions = ActionChains(driver)\n actions.move_to_element(checkout_button).click().perform()\n except Exception as e:\n print(e)\n\n def Eastbay(info, driver, bool):\n\n currentProcess = StringVar()\n driver.get(\"https://m.eastbay.com/?uri=account/editLogin\")\n driver.find_element_by_name(\"email\").send_keys(info['user'])\n driver.find_element_by_name('password').send_keys(info['pass'])\n driver.find_element_by_xpath('//*[@id=\"account_access_log_in_button\"]').click()\n\n size = '0'\n if (float(info['size']) < 10):\n size += str(info['size'])\n else:\n size = str(info['size'])\n try:\n int(size)\n size += '.0'\n except ValueError:\n x = 0\n\n try:\n driver.get(info['link']+'?size='+size)\n except Exception as e:\n print(e)\n currentProcess.set('Error in accessing link provided')\n\n try:\n # adding to cart\n driver.find_element_by_xpath('//*[@id=\"pdp_addtocart_button\"]').click()\n except Exception as e:\n print(e)\n currentProcess.set('Cannot add item to cart, edit sizes or let program try again')\n if bool:\n try:\n driver.get('https://www.eastbay.com/checkout/?uri=checkout')\n except Exception as e:\n print(e)\n\n def addAcc(click):\n tv.insert('', 'end', text=tkvar.get(), values=(userIn.get(), pwIn.get()))\n\n def selectAcc(click):\n curItem = tv.focus()\n dict = tv.item(curItem)\n print(dict)\n info['site'] = dict['text']\n info['user'] = dict['values'][0]\n info['pass'] = dict['values'][1]\n print(info)\n\n def importAcc(click):\n filename = askopenfilename()\n with open(filename) as f:\n for line in f:\n login = line.split(':')\n tv.insert('', 'end', text=login[0], values=(login[1], login[2]))\n\n def delAcc(click):\n selected_item = tv.selection()[0]\n tv.delete(selected_item)\n\n def addSh(click):\n tv2.insert('', 'end', text=tkvar2.get(), values=(linkIn.get(), size.get()))\n\n def selectShoe(click):\n curItem = tv2.focus()\n dict = tv2.item(curItem)\n print(dict)\n info['link'] = dict['values'][0]\n info['size'] = dict['values'][1]\n print(info)\n\n def deleteShoe(click):\n selected_item = tv2.selection()[0]\n tv2.delete(selected_item)\n\n def addTask(click):\n tv4.insert('', 'end', text=info['site'],\n values=(info['user'], info['pass'], info['link'], info['size'], info['proxy']))\n\n def masterFunction(task):\n foo = {'site': task['text'], 'user': task['values'][0], 'pass': task['values'][1],\n 'size': str(task['values'][3])\n , 'link': task['values'][2], 'proxy': task['values'][4]}\n\n functions_dict = {'Nike': Nike, 'Adidas': Adidas, 'Champs': Champs, 'Footlocker': Footlocker,\n 'Bodega': Bodega, 'Eastbay': Eastbay}\n\n PROXY = foo['proxy']\n\n wdriver = None\n print(self.var.get())\n if (self.var.get() == True):\n if (PROXY != ''):\n webdriver.DesiredCapabilities.CHROME['proxy'] = {\n \"httpProxy\": PROXY,\n \"ftpProxy\": PROXY,\n \"sslProxy\": PROXY,\n \"noProxy\": None,\n \"proxyType\": \"MANUAL\",\n \"autodetect\": False\n }\n wdriver = webdriver.Chrome()\n else:\n if (PROXY != ''):\n webdriver.DesiredCapabilities.PHANTOMJS['proxy'] = {\n \"httpProxy\": PROXY,\n \"ftpProxy\": PROXY,\n \"sslProxy\": PROXY,\n \"noProxy\": None,\n \"proxyType\": \"MANUAL\",\n \"autodetect\": False\n }\n wdriver = webdriver.PhantomJS()\n\n functions_dict[foo['site']](foo, wdriver, check.get())\n\n def startTasks(click):\n import time\n while(str(datetime.now())[10:16]!=timeValue.get() and timeValue.get()!=''):\n time.sleep(5)\n tasks = tv4.get_children()\n for item in tasks:\n taskInfo = tv4.item(item)\n thread = Thread(target=masterFunction, args=(taskInfo,))\n thread.start()\n # if clicked sleep thread until clicked again\n tv5.insert('', 'end', text=taskInfo['text'],\n values=(taskInfo['values'][0], 'Task Started')) #make it update\n\n def deleteTask(click):\n selected_item = tv4.selection()[0]\n tv4.delete(selected_item)\n\n def importProxy(click):\n filename = askopenfilename()\n with open(filename) as f:\n for line in f:\n tv3.insert('', 'end', text=line)\n\n def selectProxy(click):\n curItem = tv3.focus()\n dict = tv3.item(curItem)\n print(dict)\n info['proxy'] = dict['text'].replace(\"\\n\", \"\")\n print(info)\n\n info = {'site': '', 'user': '', 'pass': '', 'size': '', 'link': '', 'proxy': ''}\n\n n = ttk.Notebook(master)\n\n f1 = ttk.Frame(n) # accounts\n\n tv = Treeview(f1, columns=('user', 'pass'), selectmode=\"extended\")\n tv.heading('#0', text='Website')\n tv.column('#0', stretch=YES)\n tv.heading('#1', text='Username')\n tv.column('#1', stretch=YES)\n tv.heading('#2', text='Password')\n tv.column('#2', stretch=YES)\n tv.grid(row=0, column=0, columnspan=3, sticky=NSEW)\n\n tkvar = StringVar(f1)\n choices = {'Nike', 'Adidas', 'Champs', 'Footlocker', 'Bodega', 'Eastbay'}\n tkvar.set('Nike') # set the default option\n popupMenu = OptionMenu(f1, tkvar, 'Nike', *choices)\n Label(f1, text=\"Website\").grid(row=1, sticky=NSEW)\n popupMenu.grid(row=2, sticky=NSEW)\n\n user = Label(f1, text=\"Username\")\n user.grid(row=3, sticky=NSEW)\n userIn = Entry(f1)\n userIn.grid(row=4, sticky=NSEW)\n\n pw = Label(f1, text=\"Password\")\n pw.grid(row=5, sticky=NSEW)\n pwIn = Entry(f1)\n pwIn.grid(row=6, sticky=NSEW)\n\n add = Button(f1, text='Add Account')\n add.bind(\"\", addAcc)\n add.grid(row=7, sticky=NSEW)\n\n sel = Button(f1, text='Select Account')\n sel.bind(\"\", selectAcc)\n sel.grid(row=8, sticky=NSEW)\n\n impAcc = Button(f1, text='Import Accounts')\n impAcc.bind(\"\", importAcc)\n impAcc.grid(row=9, sticky=NSEW)\n\n deleteAcc = Button(f1, text='Delete Account')\n deleteAcc.bind(\"\", delAcc)\n deleteAcc.grid(row=10, sticky=NSEW)\n\n f2 = ttk.Frame(n)\n\n self.var = BooleanVar()\n c = Checkbutton(f2, text=\"Show Browser\", variable=self.var)\n c.grid(column=0, row=1)\n print(self.var.get())\n\n check = BooleanVar()\n c = Checkbutton(f2, text=\"Auto-Checkout\", variable=check)\n c.grid(column=0, row=2)\n print(self.var.get())\n\n def timeLimit(*args):\n value = timeValue.get()\n if len(value) > 5: timeValue.set(value[:5])\n\n timeValue = StringVar()\n timeValue.trace('w', timeLimit)\n\n time = Label(f2,text='Release Time (hh:mm)')\n time.grid(column=0,row=3)\n timeEntry = Entry(f2, textvariable=timeValue)\n timeEntry.grid(column=2,row=3)\n\n f4 = ttk.Frame(n)\n\n tv2 = Treeview(f4, columns=('link', 'size'), selectmode=\"extended\")\n tv2.heading('#0', text='Website')\n tv2.column('#0', stretch=YES)\n tv2.heading('#1', text='Link')\n tv2.column('#1', stretch=YES)\n tv2.heading('#2', text='Size')\n tv2.column('#2', stretch=YES)\n tv2.grid(row=0, column=0, columnspan=3, sticky=NSEW)\n\n tkvar2 = StringVar(f4)\n tkvar2.set('Nike')\n popupMenu2 = OptionMenu(f4, tkvar2, 'Nike', *choices)\n Label(f4, text=\"Website\").grid(row=1, sticky=NSEW)\n popupMenu2.grid(row=2, sticky=NSEW)\n\n link = Label(f4, text=\"Early Link\")\n link.grid(row=3, sticky=NSEW)\n linkIn = Entry(f4)\n linkIn.grid(row=4, sticky=NSEW)\n\n size = StringVar(f4)\n sizes = {'6.5', '7', '7.5', '8', '8.5', '9', '9.5', '10', '10.5', '11', '11.5','12','12.5','13','13.5','14','14.5','15'}\n size.set('6.5') # set the default option\n sizeMenu = OptionMenu(f4, size, '6.5', *sorted(sizes))\n Label(f4, text=\"Sizes\").grid(row=5, sticky=NSEW)\n sizeMenu.grid(row=6, sticky=NSEW)\n\n addShoe = Button(f4, text='Add Shoe')\n addShoe.bind(\"\", addSh)\n addShoe.grid(row=7, sticky=NSEW)\n\n sel2 = Button(f4, text='Select Shoe')\n sel2.bind(\"\", selectShoe)\n sel2.grid(row=8, sticky=NSEW)\n\n delShoe = Button(f4, text='Delete Shoe')\n delShoe.bind(\"\", deleteShoe)\n delShoe.grid(row=9, sticky=NSEW)\n\n f5 = ttk.Frame(n) # proxy list\n\n tv3 = Treeview(f5, selectmode=\"extended\") # proxy\n tv3.heading('#0', text='Proxy')\n tv3.column('#0', stretch=YES)\n tv3.grid(row=0, column=0, sticky='w')\n\n imp = Button(f5, text='Import Proxies')\n imp.bind(\"\", importProxy)\n imp.grid(row=2, sticky=NSEW)\n\n selProx = Button(f5, text='Select Proxy')\n selProx.bind(\"\", selectProxy)\n selProx.grid(row=3, sticky=NSEW)\n\n f6 = ttk.Frame(n)\n\n tv4 = Treeview(f6, columns=('user', 'pass', 'link', 'size', 'proxy'), selectmode=\"extended\")\n tv4.heading('#0', text='Website')\n tv4.column('#0', width=100)\n tv4.heading('#1', text='Username')\n tv4.column('#1', width=100)\n tv4.heading('#2', text='Password')\n tv4.column('#2', width=100)\n tv4.heading('#3', text='Link')\n tv4.column('#3', width=100)\n tv4.heading('#4', text='Size')\n tv4.column('#4', width=100)\n tv4.heading('#5', text='Proxy')\n tv4.column('#5', width=100)\n tv4.grid(row=0, column=0, columnspan=5, sticky=NSEW)\n\n addTsk = Button(f6, text='Add Task')\n addTsk.bind(\"\", addTask)\n addTsk.grid(row=1, sticky=NSEW)\n\n startTsk = Button(f6, text='Start Tasks')\n startTsk.bind(\"\", startTasks)\n startTsk.grid(row=2, sticky=NSEW)\n\n deleteTsk = Button(f6, text='Delete Task')\n deleteTsk.bind(\"\", deleteTask)\n deleteTsk.grid(row=3, sticky=NSEW)\n\n tv5 = Treeview(f6, columns=('email', 'task'), selectmode=\"extended\")\n tv5.heading('#0', text='Website')\n tv5.column('#0', width=200)\n tv5.heading('#1', text='Email')\n tv5.column('#1', width=200)\n tv5.heading('#2', text='Task')\n tv5.column('#2', width=200)\n tv5.grid(row=4, column=0, columnspan=3, sticky=NSEW)\n\n n.add(f1, text='Site Accounts')\n n.add(f4, text='Shoe Preferences')\n n.add(f5, text='Proxy Config')\n n.add(f2, text='Misc')\n n.add(f6, text='Task Monitor')\n\n n.grid()\n\n\ndef start():\n r = Toplevel()\n r.wm_title(\"CopBot\")\n my_gui = MainApplication(r)\n r.mainloop()\n\nclass LoginFrame(Frame):\n\n def __init__(self, master):\n super().__init__(master)\n self.label_1 = Label(self, text=\"Username\")\n self.label_2 = Label(self, text=\"Password\")\n\n self.entry_1 = Entry(self)\n self.entry_2 = Entry(self, show=\"*\")\n self.label_1.grid(row=0, sticky=E)\n self.label_2.grid(row=1, sticky=E)\n self.entry_1.grid(row=0, column=1)\n self.entry_2.grid(row=1, column=1)\n\n self.logbtn = Button(self, text=\"Login\")\n self.logbtn.bind(self.logged(self,False))\n self.logbtn.grid(columnspan=2)\n\n self.pack()\n\n def logged(self, useServer):\n username = self.entry_1.get()\n password = self.entry_2.get()\n if useServer:\n var = 'no'\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(('108.254.120.248', 1320))\n sock.send((username + \":\" + password).encode())\n while 1:\n var = sock.recv(1024).decode()\n break\n sock.close()\n if var == 'ok':\n sock.close()\n tm.showinfo(\"Login info\", \"Welcome \"+username)\n self.destroy()\n start()\n else:\n sock.close()\n tm.showerror(\"Login error\", \"Incorrect username, password or IP Address\")\n else:\n tm.showinfo(\"Login info\", \"Welcome \"+username)\n self.destroy()\n start()\n\nroot = Tk()\nlf = LoginFrame(root)\nroot.mainloop()\n", "sub_path": "GUI.py", "file_name": "GUI.py", "file_ext": "py", "file_size_in_byte": 24262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 54, "usage_type": "call"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 61, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 78, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 78, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 78, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 79, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 85, "usage_type": "call"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 117, "usage_type": "call"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 125, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 144, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 184, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 184, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 185, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 185, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 186, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 199, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 199, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 199, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 199, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 200, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 207, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 238, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 255, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 255, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 256, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 256, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 257, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 267, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 267, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 267, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 267, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 268, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 323, "usage_type": "call"}, {"api_name": "selenium.webdriver.DesiredCapabilities", "line_number": 366, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 366, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 374, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 374, "usage_type": "name"}, {"api_name": "selenium.webdriver.DesiredCapabilities", "line_number": 377, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 377, "usage_type": "name"}, {"api_name": "selenium.webdriver.PhantomJS", "line_number": 385, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 385, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 391, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 391, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 392, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 396, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 407, "usage_type": "call"}, {"api_name": "tkinter.ttk.Notebook", "line_number": 421, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 421, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 423, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 423, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 467, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 467, "usage_type": "name"}, {"api_name": "time.grid", "line_number": 487, "usage_type": "call"}, {"api_name": "tkinter.ttk.Frame", "line_number": 491, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 491, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 532, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 532, "usage_type": "name"}, {"api_name": "tkinter.ttk.Frame", "line_number": 547, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 547, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 625, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 625, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 625, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 634, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 634, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 639, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 639, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 641, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 641, "usage_type": "name"}]} +{"seq_id": "72458113", "text": "\"\"\"\nDefines the common utility functions used in our applications.\n\"\"\"\n\nfrom django.apps import apps as django_apps\nfrom django.contrib.auth.models import Group\nfrom django.core.cache import cache\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.http import HttpRequest\nfrom django.utils.cache import get_cache_key\nfrom django.utils.module_loading import import_string\nfrom organizations.models import Organization\nfrom rest_framework import exceptions, serializers\n\nfrom backend import settings\n\n\ndef field_not_found_error():\n \"\"\"Generates error message for when a field does not exist.\"\"\"\n return \"Field not found.\"\n\n\ndef field_with_id_not_found_error(field_id):\n \"\"\"Generates error message for when a field does not exist.\"\"\"\n return \"Field with id={} not found.\".format(field_id)\n\n\ndef field_required_error():\n \"\"\"Generates error message for when a field is required.\"\"\"\n return \"This is a required field.\"\n\n\ndef field_invalid_error():\n \"\"\"Generates error message for when a field is invalid.\"\"\"\n return \"Invalid field.\"\n\n\ndef get_user_from_serializer(serializer, raise_exception=False):\n \"\"\"\n Returns the user from serializer context. Raises permission denied error\n if user is not found.\n \"\"\"\n\n # get user requesting for a new registration\n request_user = None\n request = serializer.context.get(\"request\")\n if request and hasattr(request, \"user\"):\n request_user = request.user\n else:\n if raise_exception:\n # raise unauthorized error if user is not found\n # most probably this will never get called\n raise exceptions.PermissionDenied()\n return request_user\n\n\ndef filter_queryset_by_lookup_list(query_set, lookup_list, lookup_param):\n \"\"\"\n Filters a queryset by th given list of lookup params\n \"\"\"\n query = '{}__in'.format(lookup_param)\n return query_set.filter(**{query: lookup_list})\n\n\ndef exclude_queryset_by_lookup_list(query_set, lookup_list, lookup_param):\n \"\"\"\n Filters a queryset by excluding the given list of lookup params\n \"\"\"\n query = '{}__in'.format(lookup_param)\n return query_set.exclude(**{query: lookup_list})\n\n\ndef filter_objects_by_lookup_list(objects, lookup_list, lookup_param):\n \"\"\"\n Filters the object by lookup list. If all lookup params in the list do not\n match, a not found exception is raised.\n \"\"\"\n try:\n filtered_objects = filter_queryset_by_lookup_list(\n objects, lookup_list, lookup_param)\n\n # make sure all the given ids are inside filtered objects,\n # otherwise raise a validation error\n if len(filtered_objects) != len(lookup_list):\n raise exceptions.NotFound(\n {\n 'id': 'The following requested ids are invalid: {}'.format(\n exclude_queryset_by_lookup_list(\n objects, lookup_list, lookup_param).values_list(\n lookup_param, flat=True))\n })\n return filtered_objects\n except:\n raise exceptions.ValidationError(\n detail={lookup_param: \"Invalid list of parameters.\"})\n\n\ndef get_lookup_list(request, lookup_param):\n \"\"\"\n Returns the list of ids from API request.\n \"\"\"\n return request.query_params.getlist(lookup_param)\n\n\ndef get_user_group_model():\n try:\n return django_apps.get_model(settings.USER_GROUP_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"USER_GROUP_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"USER_GROUP_MODEL refers to model '%s' that has not been installed\" % settings.USER_GROUP_MODEL\n )\n\n\ndef get_organization_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_MODEL\n )\n\n\ndef get_organization_user_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_USER_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_USER_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_USER_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_USER_MODEL\n )\n\n\ndef get_organization_owner_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_OWNER_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_OWNER_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_OWNER_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_OWNER_MODEL\n )\n\n\ndef get_organization_group_model():\n try:\n return django_apps.get_model(settings.ORGANIZATION_GROUP_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_GROUP_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"ORGANIZATION_GROUP_MODEL refers to model '%s' that has not been installed\" % settings.ORGANIZATION_GROUP_MODEL\n )\n\n\ndef get_organization_auth_backend():\n auth_backend = import_string(settings.ORGANIZATION_USER_AUTH_BACKEND)\n if not auth_backend:\n raise ImproperlyConfigured(\n 'No authentication backends have been defined. Does '\n 'ORGANIZATION_USER_AUTH_BACKEND contain anything?'\n )\n return auth_backend\n", "sub_path": "core/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.NotFound", "line_number": 85, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 85, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 94, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 107, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 107, "usage_type": "name"}, {"api_name": "backend.settings.USER_GROUP_MODEL", "line_number": 107, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 107, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 109, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 112, "usage_type": "call"}, {"api_name": "backend.settings.USER_GROUP_MODEL", "line_number": 113, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 113, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 119, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 119, "usage_type": "name"}, {"api_name": "backend.settings.ORGANIZATION_MODEL", "line_number": 119, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 119, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 121, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 124, "usage_type": "call"}, {"api_name": "backend.settings.ORGANIZATION_MODEL", "line_number": 125, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 125, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 131, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 131, "usage_type": "name"}, {"api_name": "backend.settings.ORGANIZATION_USER_MODEL", "line_number": 131, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 131, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 133, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 136, "usage_type": "call"}, {"api_name": "backend.settings.ORGANIZATION_USER_MODEL", "line_number": 137, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 137, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 143, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 143, "usage_type": "name"}, {"api_name": "backend.settings.ORGANIZATION_OWNER_MODEL", "line_number": 143, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 143, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 145, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 148, "usage_type": "call"}, {"api_name": "backend.settings.ORGANIZATION_OWNER_MODEL", "line_number": 149, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 149, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 155, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 155, "usage_type": "name"}, {"api_name": "backend.settings.ORGANIZATION_GROUP_MODEL", "line_number": 155, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 155, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 157, "usage_type": "call"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 160, "usage_type": "call"}, {"api_name": "backend.settings.ORGANIZATION_GROUP_MODEL", "line_number": 161, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 161, "usage_type": "name"}, {"api_name": "django.utils.module_loading.import_string", "line_number": 166, "usage_type": "call"}, {"api_name": "backend.settings.ORGANIZATION_USER_AUTH_BACKEND", "line_number": 166, "usage_type": "attribute"}, {"api_name": "backend.settings", "line_number": 166, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "156644010", "text": "from selenium import webdriver\nimport time\n\ntry:\n browser = webdriver.Chrome(\"chromedriver.exe\")\nexcept:\n print(\"Error!\")\n\nbrowser.get(\"https://www.forbes.com/powerful-brands/list/#tab:rank\")\nbrowser.maximize_window()\ntime.sleep(3)\nbrowser.get(\n \"https://www.forbes.com/companies/apple/?list=powerful-brands#2cc564585355\")\ntime.sleep(3)\nbrowser.back()\ntime.sleep(10)\nbrowser.quit()\n", "sub_path": "learn/dd/slnm1.py", "file_name": "slnm1.py", "file_ext": "py", "file_size_in_byte": 391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 5, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "568303449", "text": "import pytest\r\n\r\nfrom lib import consts\r\nfrom lib.log_parser import LogParser\r\nfrom lib.environment import Env\r\nfrom tests.tests_old.install.installbase import InstallBase\r\n\r\ndesired_backup_mode = None\r\n\r\n\r\nclass TestGICreateAccount(InstallBase):\r\n\r\n @pytest.mark.skipif(InstallBase.SERVER_EDITION, reason='Personal not supported on server editions')\r\n def test_create_account_through_service(self):\r\n \"\"\"\r\n @TITLE\r\n Create an account from the service\r\n\r\n @DESCRIPTION\r\n Create an account from the service after the Generic Install has laid down the bits and the service and UI are up\r\n\r\n @PRECONDITIONS\r\n Carbonite is not installed on PC\r\n\r\n @PROCEDURE\r\n 1. Install the generic installer(version=latest)\r\n 2. Hit the Service to create an account\r\n 3. Verify that the account is created and reguid is received\r\n\r\n @PASS FAIL CRITERIA\r\n PASS criteria: 1. Account created\r\n 2. Reguid received\r\n\r\n FAIL criteria: Any of the above PASS criteria condition fails\r\n \"\"\"\r\n self.log.info(\"Installing the Latest version of the Generic Installer with NO REGUID.\")\r\n self.install(account=consts.personal_account, generic=True, cluster=consts.endpoint_stabilityGI_build)\r\n #if the Policy text exist in the registry (means missed account creation page)\r\n #if the string \"Change to page 2 phase 500\" not exist in CarboniteUI.log (means missed account creation page)\r\n if (self.check_policy_text_exist() or\r\n not LogParser(consts.win_ui_log_file_path).text_exists(consts.create_account_page_flag)):\r\n raise RuntimeError(\"Generic Installer missed account creation page\")\r\n home_server, membership_server, portal_server, registration_guid, creds = self.create_account_from_service(Env().portal)\r\n\r\n # these are the same until the account logs in using the sign-on page\r\n assert home_server == \"{0}.{1}\".format(Env().download_host, Env().download_domain), \\\r\n 'HomeServer is {0}, should be {1}.{2}'.format(home_server, Env().download_host, Env().download_domain)\r\n assert membership_server == \"{0}.{1}\".format(Env().download_host, Env().download_domain),\\\r\n 'MembershipServer is {0}, should be {1}.{2}'.format(membership_server, Env().download_host, Env().download_domain)\r\n assert portal_server == \"{0}.{1}\".format(Env().download_host, Env().download_domain),\\\r\n 'Portal server is {0}, should be {1}.{2}'.format(portal_server, Env().download_host, Env().download_domain)\r\n assert len(registration_guid) > 0, 'Registration GUID is 0 length string'\r\n #just log this for now, will use these credentials to login when sign-on page is implemented\r\n self.log.info(creds)\r\n", "sub_path": "Chandan Singh/headless-automation__14sep/tests/tests_old/install/test_GI_create_account.py", "file_name": "test_GI_create_account.py", "file_ext": "py", "file_size_in_byte": 2828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tests.tests_old.install.installbase.InstallBase", "line_number": 11, "usage_type": "name"}, {"api_name": "lib.consts.personal_account", "line_number": 37, "usage_type": "attribute"}, {"api_name": "lib.consts", "line_number": 37, "usage_type": "name"}, {"api_name": "lib.consts.endpoint_stabilityGI_build", "line_number": 37, "usage_type": "attribute"}, {"api_name": "lib.log_parser.LogParser", "line_number": 41, "usage_type": "call"}, {"api_name": "lib.consts.win_ui_log_file_path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "lib.consts", "line_number": 41, "usage_type": "name"}, {"api_name": "lib.consts.create_account_page_flag", "line_number": 41, "usage_type": "attribute"}, {"api_name": "lib.environment.Env", "line_number": 43, "usage_type": "call"}, {"api_name": "lib.environment.Env", "line_number": 46, "usage_type": "call"}, {"api_name": "lib.environment.Env", "line_number": 47, "usage_type": "call"}, {"api_name": "lib.environment.Env", "line_number": 48, "usage_type": "call"}, {"api_name": "lib.environment.Env", "line_number": 49, "usage_type": "call"}, {"api_name": "lib.environment.Env", "line_number": 50, "usage_type": "call"}, {"api_name": "lib.environment.Env", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tests.tests_old.install.installbase.InstallBase.SERVER_EDITION", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tests.tests_old.install.installbase.InstallBase", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "161955870", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('frame', '0022_auto_20181211_1605'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='scope',\n name='ops_main',\n field=models.ForeignKey(related_name='scopes_ops_main', verbose_name='\\u8fd0\\u7ef4\\u4eba\\u5458\\uff08\\u4e3b\\uff09', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n migrations.AddField(\n model_name='scope',\n name='ops_stan',\n field=models.ForeignKey(related_name='scopes_ops_stan', verbose_name='\\u8fd0\\u7ef4\\u4eba\\u5458\\uff08\\u5907\\uff09', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n ]\n", "sub_path": "WiseEyeIAMService/frame/migrations/0023_auto_20181212_1517.py", "file_name": "0023_auto_20181212_1517.py", "file_ext": "py", "file_size_in_byte": 909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "238675206", "text": "from starlette.endpoints import HTTPEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import RedirectResponse\nfrom starlette.templating import Jinja2Templates\n\nfrom psion.webtools import urlencode\n\nfrom example.provider import provider\nfrom example.settings import BASEDIR\n\n\ntemplates = Jinja2Templates(BASEDIR / \"templates\")\n\n\nclass AuthorizationEndpoint(HTTPEndpoint):\n async def get(self, request: Request):\n if not request.user:\n url = urlencode(str(request.url_for(\"auth:login\")), next=str(request.url))\n return RedirectResponse(url, 303)\n\n request = await provider.create_request(request)\n response = await provider.authorize(request)\n return await provider.create_response(response)\n\n\nclass ErrorEndpoint(HTTPEndpoint):\n async def get(self, request: Request):\n error_description = request.query_params.get(\"error_description\")\n error = request.query_params.get(\"error\")\n\n return templates.TemplateResponse(\n \"error.j2\",\n {\n \"request\": request,\n \"title\": \"Error\",\n \"error_description\": error_description,\n \"error\": error,\n },\n )\n\n\nclass RevocationEndpoint(HTTPEndpoint):\n async def post(self, request: Request):\n request = await provider.create_request(request)\n response = await provider.revoke(request)\n return await provider.create_response(response)\n\n\nclass TokenEndpoint(HTTPEndpoint):\n async def post(self, request: Request):\n request = await provider.create_request(request)\n response = await provider.token(request)\n return await provider.create_response(response)\n", "sub_path": "example/views/connect.py", "file_name": "connect.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "starlette.templating.Jinja2Templates", "line_number": 12, "usage_type": "call"}, {"api_name": "example.settings.BASEDIR", "line_number": 12, "usage_type": "name"}, {"api_name": "starlette.endpoints.HTTPEndpoint", "line_number": 15, "usage_type": "name"}, {"api_name": "starlette.requests.Request", "line_number": 16, "usage_type": "name"}, {"api_name": "psion.webtools.urlencode", "line_number": 18, "usage_type": "call"}, {"api_name": "starlette.responses.RedirectResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "example.provider.provider.create_request", "line_number": 21, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 21, "usage_type": "name"}, {"api_name": "example.provider.provider.authorize", "line_number": 22, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 22, "usage_type": "name"}, {"api_name": "example.provider.provider.create_response", "line_number": 23, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 23, "usage_type": "name"}, {"api_name": "starlette.endpoints.HTTPEndpoint", "line_number": 26, "usage_type": "name"}, {"api_name": "starlette.requests.Request", "line_number": 27, "usage_type": "name"}, {"api_name": "starlette.endpoints.HTTPEndpoint", "line_number": 42, "usage_type": "name"}, {"api_name": "starlette.requests.Request", "line_number": 43, "usage_type": "name"}, {"api_name": "example.provider.provider.create_request", "line_number": 44, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 44, "usage_type": "name"}, {"api_name": "example.provider.provider.revoke", "line_number": 45, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 45, "usage_type": "name"}, {"api_name": "example.provider.provider.create_response", "line_number": 46, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 46, "usage_type": "name"}, {"api_name": "starlette.endpoints.HTTPEndpoint", "line_number": 49, "usage_type": "name"}, {"api_name": "starlette.requests.Request", "line_number": 50, "usage_type": "name"}, {"api_name": "example.provider.provider.create_request", "line_number": 51, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 51, "usage_type": "name"}, {"api_name": "example.provider.provider.token", "line_number": 52, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 52, "usage_type": "name"}, {"api_name": "example.provider.provider.create_response", "line_number": 53, "usage_type": "call"}, {"api_name": "example.provider.provider", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "110247693", "text": "import os, datetime, time, argparse\nimport math, cv2, numpy as np\nimport data, model\nimport torch\nimport torch.nn.functional as F\nimport misc.flow_vis as fv\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--test_name', type=str, required=True)\nparser.add_argument('--data_path', type=str, required=True)\nparser.add_argument('--model_path', type=str, required=True)\nparser.add_argument('--batch_size', type=int, default=1)\nparser.add_argument('--hr_size', type=int, default=720)\nparser.add_argument('--lr_size', type=int, default=180)\nparser.add_argument('--blur', action='store_true')\nparser.add_argument('--save_lr', action='store_true')\nparser.add_argument('--save_hr', action='store_true')\nparser.add_argument('--save_flows', action='store_true')\nparser.add_argument('--save_warps', action='store_true')\nargs = parser.parse_args()\n\n## MODEL LOADING\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ncp_dict = torch.load(args.model_path, map_location=device)\nflow, inter = cp_dict['flow_net'], cp_dict['inter_net']\ndelta = cp_dict['delta']\nC = cp_dict['channels']\n\nflow_c_in = 2 * C\nflow_c_out = 2\ninter_c_in = 4 * C * delta + 2 * C + 4 * delta + 2\ninter_c_out = C\n\nflow_net = getattr(__import__('models.' + flow, fromlist=[flow]), flow)(flow_c_in, flow_c_out)\ninter_net = getattr(__import__('models.' + inter, fromlist=[inter]), inter)(inter_c_in, inter_c_out)\nsr_model = model.SuperResModel(delta, flow_net, inter_net)\nsr_model.load_state_dict(cp_dict['state_dict'])\n\nif torch.cuda.device_count() > 1:\n\tsr_model = torch.nn.DataParallel(sr_model)\nsr_model.to(device)\n\ngridX, gridY = np.meshgrid(np.arange(args.lr_size), np.arange(args.lr_size))\nnum_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1\ngridX = torch.tensor(gridX, requires_grad=False, device=device).repeat(num_devices, 1, 1)\ngridY = torch.tensor(gridY, requires_grad=False, device=device).repeat(num_devices, 1, 1)\n\n## DATA LOADING\n\nprint('Loading test data...')\n\ntestset = data.SuperRes(args.data_path, 'convert', delta, 0, 0, C)\ntestloader = torch.utils.data.DataLoader(testset, batch_size = args.batch_size, shuffle = False, num_workers = 4)\n\nprint('Loaded ' + str(len(testset)) + ' test examples')\n\n## TEST LOOP\n\nif args.save_lr:\n\tos.makedirs(os.path.join('converted', args.test_name, 'lr'), exist_ok=True)\nif args.save_hr:\n\tos.makedirs(os.path.join('converted', args.test_name, 'hr'), exist_ok=True)\nif args.save_flows:\n\tfor i in range(2 * delta + 1):\n\t\tos.makedirs(os.path.join('converted', args.test_name, 'flows_%02d' % i), exist_ok=True)\nif args.save_warps:\n\tfor i in range(2 * delta + 1):\n\t\tos.makedirs(os.path.join('converted', args.test_name, 'warps_%02d' % i), exist_ok=True)\nos.makedirs(os.path.join('converted', args.test_name, 'pred'), exist_ok=True)\n\ndef gaussian_kernel(size, sigma=2., dim=2, channels=3):\n kernel_size = 2*size + 1\n kernel_size = [kernel_size] * dim\n sigma = [sigma] * dim\n kernel = 1\n meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])\n for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n mean = (size - 1) / 2\n kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2)\n kernel = kernel / torch.sum(kernel)\n kernel = kernel.view(1, 1, *kernel.size())\n kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n return kernel\n\ngaussian_filter = gaussian_kernel(1, sigma=1.5, channels=C).to(device)\n\ndef write_data(test_frames, output, test_index, C):\n\tfor i in range(output.shape[0]):\n\t\tif args.save_lr:\n\t\t\timg_lr = np.moveaxis(input[i, delta].cpu().numpy(), 0, 2) * 255\n\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'lr', 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), img_lr)\n\t\tif args.save_hr:\n\t\t\timg_hr = np.moveaxis(test_frames[i, delta].cpu().numpy(), 0, 2) * 255\n\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'hr', 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), img_hr)\n\t\timg_pred = np.moveaxis(output[i, :C].cpu().numpy(), 0, -1) * 255\n\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'pred', 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), img_pred)\n\t\tif args.save_flows:\n\t\t\tfor f in range(2 * delta + 1):\n\t\t\t\tuv = np.moveaxis(output[i, C + f * 2 : C + (f + 1) * 2].cpu().numpy(), 0, -1)\n\t\t\t\tflow_img = fv.flow_to_color(uv, convert_to_bgr=True)\n\t\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'flows_%02d' % f, 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), flow_img)\n\t\tif args.save_warps:\n\t\t\tfirst_c = C if not args.save_flows else (C + 2 * (2 * delta + 1))\n\t\t\tfor f in range(2 * delta + 1):\n\t\t\t\twarp_img = np.moveaxis(output[i, first_c + f * C : first_c + (f + 1) * C].cpu().numpy(), 0, -1) * 255\n\t\t\t\tcv2.imwrite(os.path.join('converted', args.test_name, 'warps_%02d' % f, 'frame%06d.jpg' % (test_index * args.batch_size + i + 1)), warp_img)\n\nprint('Converting...')\n\nwith torch.no_grad():\n\tfor test_index, test_frames in enumerate(testloader, 0):\n\t\tif test_index % int(len(testloader) / 100) == 0:\n\t\t\tprint('.', end='', flush=True)\n\t\tB, NF, C, H_hr, W_hr = test_frames.shape\n\t\ttest_frames = test_frames.to(device)\n\t\tinput = test_frames.view(B * NF, C, H_hr, W_hr)\n\t\tif args.blur:\n\t\t\tinput = F.conv2d(input, gaussian_filter, padding=1, groups=C)\n\t\tinput = F.interpolate(input, size=args.lr_size, mode='area')\n\t\tinput = input.view(B, NF, C, args.lr_size, args.lr_size)\n\t\toutput = sr_model(input, args.hr_size, gridX, gridY, out_flows=args.save_flows, out_warps=args.save_warps)\n\t\twrite_data(test_frames, output, test_index, C)\n", "sub_path": "convert_video.py", "file_name": "convert_video.py", "file_ext": "py", "file_size_in_byte": 5610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 26, "usage_type": "call"}, {"api_name": "model.SuperResModel", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "data.SuperRes", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.meshgrid", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 101, "usage_type": "call"}, {"api_name": "misc.flow_vis.flow_to_color", "line_number": 102, "usage_type": "call"}, {"api_name": "misc.flow_vis", "line_number": 102, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "237930868", "text": "import numpy as np\nimport os\nimport SimpleITK as sitk\n# import nibabel as nib\nimport pandas as pd\nimport copy\nimport PySimpleGUI as gui\nimport matplotlib.pyplot as plt\nimport glob\nimport sys\nfrom myutil.myutil import load_itk, get_gdth_pred_names, one_hot_encode_3d\n\n\n# %%\ndef show_itk(itk, idx):\n ref_surface_array = sitk.GetArrayViewFromImage(itk)\n plt.figure()\n plt.imshow(ref_surface_array[idx])\n plt.show()\n\n return None\n\n\ndef computeQualityMeasures(lP, lT, spacing, metrics_type=None):\n \"\"\"\n\n :param lP: prediction, shape (x, y, z)\n :param lT: ground truth, shape (x, y, z)\n :param spacing: shape order (x, y, z)\n :return: quality: dict contains metircs\n \"\"\"\n quality = {}\n labelPred = sitk.GetImageFromArray(lP, isVector=False)\n labelPred.SetSpacing(spacing)\n labelTrue = sitk.GetImageFromArray(lT, isVector=False)\n labelTrue.SetSpacing(spacing) # spacing order (x, y, z)\n\n voxel_metrics = ['dice', 'jaccard', 'precision', 'recall', 'fpr', 'fnr', 'vs']\n distance_metrics = ['hd', 'hd95', 'msd', 'mdsd', 'stdsd']\n metrics_type = set([]) if metrics_type is None else set(metrics_type)\n # to save time, we need to determine which metrics we need to compute\n if set(voxel_metrics).intersection(metrics_type) or not metrics_type:\n pred = lP.astype(int) # float data does not support bit_and and bit_or\n gdth = lT.astype(int) # float data does not support bit_and and bit_or\n fp_array = copy.deepcopy(pred) # keep pred unchanged\n fn_array = copy.deepcopy(gdth)\n gdth_sum = np.sum(gdth)\n pred_sum = np.sum(pred)\n intersection = gdth & pred\n union = gdth | pred\n intersection_sum = np.count_nonzero(intersection)\n union_sum = np.count_nonzero(union)\n\n tp_array = intersection\n\n tmp = pred - gdth\n fp_array[tmp < 1] = 0\n\n tmp2 = gdth - pred\n fn_array[tmp2 < 1] = 0\n\n tn_array = np.ones(gdth.shape) - union\n\n tp, fp, fn, tn = np.sum(tp_array), np.sum(fp_array), np.sum(fn_array), np.sum(tn_array)\n\n smooth = 0.001\n precision = tp / (pred_sum + smooth)\n recall = tp / (gdth_sum + smooth)\n\n false_positive_rate = fp / (fp + tn + smooth)\n false_negtive_rate = fn / (fn + tp + smooth)\n\n jaccard = intersection_sum / (union_sum + smooth)\n dice = 2 * intersection_sum / (gdth_sum + pred_sum + smooth)\n\n dicecomputer = sitk.LabelOverlapMeasuresImageFilter()\n dicecomputer.Execute(labelTrue > 0.5, labelPred > 0.5)\n\n quality[\"dice\"] = dice\n quality[\"jaccard\"] = jaccard\n quality[\"precision\"] = precision\n quality[\"recall\"] = recall\n quality[\"false_negtive_rate\"] = false_negtive_rate\n quality[\"false_positive_rate\"] = false_positive_rate\n quality[\"volume_similarity\"] = dicecomputer.GetVolumeSimilarity()\n\n if set(distance_metrics).intersection(metrics_type) or not metrics_type:\n slice_idx = 300\n # Surface distance measures\n signed_distance_map = sitk.SignedMaurerDistanceMap(labelTrue > 0.5, squaredDistance=False,\n useImageSpacing=True) # It need to be adapted.\n # show_itk(signed_distance_map, slice_idx)\n\n ref_distance_map = sitk.Abs(signed_distance_map)\n # show_itk(ref_distance_map, slice_idx)\n\n ref_surface = sitk.LabelContour(labelTrue > 0.5, fullyConnected=True)\n # show_itk(ref_surface, slice_idx)\n ref_surface_array = sitk.GetArrayViewFromImage(ref_surface)\n\n statistics_image_filter = sitk.StatisticsImageFilter()\n statistics_image_filter.Execute(ref_surface > 0.5)\n\n num_ref_surface_pixels = int(statistics_image_filter.GetSum())\n\n signed_distance_map_pred = sitk.SignedMaurerDistanceMap(labelPred > 0.5, squaredDistance=False,\n useImageSpacing=True)\n # show_itk(signed_distance_map_pred, slice_idx)\n\n seg_distance_map = sitk.Abs(signed_distance_map_pred)\n # show_itk(seg_distance_map, slice_idx)\n\n seg_surface = sitk.LabelContour(labelPred > 0.5, fullyConnected=True)\n # show_itk(seg_surface, slice_idx)\n seg_surface_array = sitk.GetArrayViewFromImage(seg_surface)\n\n seg2ref_distance_map = ref_distance_map * sitk.Cast(seg_surface, sitk.sitkFloat32)\n # show_itk(seg2ref_distance_map, slice_idx)\n\n ref2seg_distance_map = seg_distance_map * sitk.Cast(ref_surface, sitk.sitkFloat32)\n # show_itk(ref2seg_distance_map, slice_idx)\n\n statistics_image_filter.Execute(seg_surface > 0.5)\n\n num_seg_surface_pixels = int(statistics_image_filter.GetSum())\n\n seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])\n seg2ref_distances = seg2ref_distances + list(np.zeros(num_seg_surface_pixels - len(seg2ref_distances)))\n ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])\n ref2seg_distances = ref2seg_distances + list(np.zeros(num_ref_surface_pixels - len(ref2seg_distances))) #\n\n all_surface_distances = seg2ref_distances + ref2seg_distances\n quality[\"mean_surface_distance\"] = np.mean(all_surface_distances)\n quality[\"median_surface_distance\"] = np.median(all_surface_distances)\n quality[\"std_surface_distance\"] = np.std(all_surface_distances)\n quality[\"95_surface_distance\"] = np.percentile(all_surface_distances, 95)\n quality[\"Hausdorff\"] = np.max(all_surface_distances)\n\n return quality\n\n\ndef get_metrics_dict_all_labels(labels, gdth, pred, spacing, metrics_type=None):\n \"\"\"\n\n :param metrics_type:\n :param labels: not include background, e.g. [4,5,6,7,8] or [1]\n :param gdth: shape: (x, y, z, channels), channels is equal to len(labels) or equal to len(labels)+1 (background)\n :param pred: the same as above\n :param spacing: spacing order should be (x, y, z) !!!\n :return: metrics_dict_all_labels a dict which contain all metrics\n \"\"\"\n metrics_parameters_dict = {}\n\n Hausdorff_list = []\n Dice_list = []\n Jaccard_list = []\n Volume_list = []\n mean_surface_dis_list = []\n median_surface_dis_list = []\n std_surface_dis_list = []\n nine5_surface_dis_list = []\n precision_list = []\n recall_list = []\n false_positive_rate_list = []\n false_negtive_rate_list = []\n\n for i, label in enumerate(labels):\n print('start get metrics for label: ', label)\n pred_per = pred[..., i] # select onlabel\n gdth_per = gdth[..., i]\n\n metrics = computeQualityMeasures(pred_per, gdth_per, spacing=spacing, metrics_type=metrics_type)\n print(metrics)\n\n Dice_list.append(metrics[\"dice\"])\n Jaccard_list.append(metrics[\"jaccard\"])\n precision_list.append(metrics[\"precision\"])\n recall_list.append(metrics[\"recall\"])\n false_negtive_rate_list.append(metrics[\"false_negtive_rate\"])\n false_positive_rate_list.append(metrics[\"false_positive_rate\"])\n Volume_list.append(metrics[\"volume_similarity\"])\n\n mean_surface_dis_list.append(metrics[\"mean_surface_distance\"])\n median_surface_dis_list.append(metrics[\"median_surface_distance\"])\n std_surface_dis_list.append(metrics[\"std_surface_distance\"])\n nine5_surface_dis_list.append(metrics[\"95_surface_distance\"])\n Hausdorff_list.append(metrics[\"Hausdorff\"])\n\n metrics_dict_all_labels = {'dice': Dice_list,\n 'jaccard': Jaccard_list,\n 'precision': precision_list,\n 'recall': recall_list,\n 'fpr': false_positive_rate_list,\n 'fnr': false_negtive_rate_list,\n 'vs': Volume_list,\n 'hd': Hausdorff_list,\n 'msd': mean_surface_dis_list,\n 'mdsd': median_surface_dis_list,\n 'stdsd': std_surface_dis_list,\n 'hd95': nine5_surface_dis_list}\n\n metrics_dict = {k: v for k, v in metrics_dict_all_labels.items() if v} # remove empty values\n\n return metrics_dict\n\n\ndef write_metrics(labels, gdth_path, pred_path, csv_file, metrics=None):\n \"\"\"\n\n :param labels: exclude background\n :param gdth_path: a absolute directory path or file name\n :param pred_path: a absolute directory path or file name\n :param csv_file: filename to save the metrics\n :return: metrics_dict_all_labels: a dict which save metrics\n \"\"\"\n print('start calculate all metrics (volume and distance) and write them to csv')\n if '/' not in gdth_path.split('.')[-1]: # gdth is a file instead of a directory\n gdth_names, pred_names = [gdth_path], [pred_path]\n else:\n gdth_names, pred_names = get_gdth_pred_names(gdth_path, pred_path)\n\n for gdth_name, pred_name in zip(gdth_names, pred_names):\n gdth, gdth_origin, gdth_spacing = load_itk(gdth_name)\n pred, pred_origin, pred_spacing = load_itk(pred_name)\n\n gdth = one_hot_encode_3d(gdth, labels=labels)\n pred = one_hot_encode_3d(pred, labels=labels)\n print('start calculate all metrics for image: ', pred_name)\n metrics_dict_all_labels = get_metrics_dict_all_labels(labels, gdth, pred, spacing=gdth_spacing[::-1],\n metrics_type=metrics)\n metrics_dict_all_labels['filename'] = pred_name # add a new key to the metrics\n data_frame = pd.DataFrame(metrics_dict_all_labels)\n data_frame.to_csv(csv_file, mode='a', header=not os.path.exists(csv_file), index=False)\n\n return metrics_dict_all_labels\n\n\ndef main():\n labels = [0, 4, 5, 6, 7, 8]\n gdth_path = 'data/gdth'\n pred_path = 'data/pred'\n csv_file = 'metrics.csv'\n\n write_metrics(labels=labels[1:], # exclude background\n gdth_path=gdth_path,\n pred_path=pred_path,\n csv_file=csv_file)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "seg_metrics/seg_metrics.py", "file_name": "seg_metrics.py", "file_ext": "py", "file_size_in_byte": 10328, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "SimpleITK.GetImageFromArray", "line_number": 33, "usage_type": "call"}, {"api_name": "SimpleITK.GetImageFromArray", "line_number": 35, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 45, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 64, "usage_type": "call"}, {"api_name": "SimpleITK.LabelOverlapMeasuresImageFilter", "line_number": 76, "usage_type": "call"}, {"api_name": "SimpleITK.SignedMaurerDistanceMap", "line_number": 90, "usage_type": "call"}, {"api_name": "SimpleITK.Abs", "line_number": 94, "usage_type": "call"}, {"api_name": "SimpleITK.LabelContour", "line_number": 97, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 99, "usage_type": "call"}, {"api_name": "SimpleITK.StatisticsImageFilter", "line_number": 101, "usage_type": "call"}, {"api_name": "SimpleITK.SignedMaurerDistanceMap", "line_number": 106, "usage_type": "call"}, {"api_name": "SimpleITK.Abs", "line_number": 110, "usage_type": "call"}, {"api_name": "SimpleITK.LabelContour", "line_number": 113, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 115, "usage_type": "call"}, {"api_name": "SimpleITK.Cast", "line_number": 117, "usage_type": "call"}, {"api_name": "SimpleITK.sitkFloat32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "SimpleITK.Cast", "line_number": 120, "usage_type": "call"}, {"api_name": "SimpleITK.sitkFloat32", "line_number": 120, "usage_type": "attribute"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 139, "usage_type": "call"}, {"api_name": "myutil.myutil.get_gdth_pred_names", "line_number": 222, "usage_type": "call"}, {"api_name": "myutil.myutil.load_itk", "line_number": 225, "usage_type": "call"}, {"api_name": "myutil.myutil.load_itk", "line_number": 226, "usage_type": "call"}, {"api_name": "myutil.myutil.one_hot_encode_3d", "line_number": 228, "usage_type": "call"}, {"api_name": "myutil.myutil.one_hot_encode_3d", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}]} +{"seq_id": "213597222", "text": "\nimport os\nfrom flask import Flask, request, redirect, url_for, render_template\nfrom werkzeug.utils import secure_filename\n\nUPLOAD_FOLDER = '/home/user/Desktop/Joon/Uploads'\nALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif', 'txt'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n@app.route('/upload', methods = ['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n f = request.files['file']\n f.save(secure_filename(f.filename))\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], f.filename))\n return 'file uploaded successfully'\n\"\"\"\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return render_template('hello.html')\n\"\"\"\n \n@app.route('/')\ndef welcome():\n return render_template('ImageUpload.html') # render a template\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "Image Upload(Joon)/imageupload(Original).py", "file_name": "imageupload(Original).py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "522612558", "text": "# -*- encoding: utf-8 -*-\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom opencart.items import OpencartItem\nfrom scrapy.http import Request\nimport re\nimport datetime\nimport MySQLdb\n\ndef image_name_cleaner(image_url):\n if image_url == 'http://avtoto.com.ua/skin/images/placeholder.png':\n image_name = 'placeholder.png'\n else:\n image_name = image_url.split(\"/\")\n image_name = image_name[-1]\n return image_name\n\n\nclass OpencartSpider(CrawlSpider):\n name = \"main\"\n allowed_domains = ['avtoto.com.ua']\n start_urls = ['http://avtoto.com.ua/komplekty/komplekt-tormozov/']\n\n rules = (\n Rule(SgmlLinkExtractor(\\\n restrict_xpaths=\"//a[@class='btn btngrey next']\"),\\\n callback='parse_start_url',\\\n follow=True),)\n\n def parse_start_url(self, response):\n prod_url_list = response.xpath(\"//div[@class='product-name']/h2/a/@href\").extract()\n for url in prod_url_list:\n yield Request(url, callback=self.parse_product)\n\n def parse_product(self, response):\n item = OpencartItem()\n\n item['model'] = \"Prod_\"\n item['quantity'] = 99\n item['stock_status_id'] = 3\n item['price'] = '148300'\n item['minimum'] = 1\n item['status'] = 1\n item['date_added'] = datetime.date.today()\n item['date_modified'] = datetime.date.today()\n item['viewed'] = 1\n\n item['sku'] = \"\"\n item['upc'] = \"\"\n item['ean'] = \"\" \n item['jan'] = \"\"\n item['isbn'] = \"\"\n item['mpn'] = \"\"\n item['location'] = \"\" \n item['manufacturer_id'] = \"1\"\n item['tax_class_id'] = \"1\"\n item['date_available'] = \"2016-01-01\"\n\n item['name'] =response.xpath('//h1/text()').extract()[0]\n item['language_id'] = 1\n item['description'] = 'Длинное описание товара'\n item['meta_description'] = 'МЕТА описание'\n item['meta_keyword'] = 'МЕТА ключевые слова'\n item['seo_title'] = 'Тайтл'\n item['seo_h1'] = 'H1'\n item['tag'] = '1'\n\n item['main_category'] = 1\n\n # images block\n\n image_names_list = []\n image_url_list = []\n try:\n first_image_name = image_name_cleaner(response.xpath(\"//div[@class='product-image']/img/@src\").extract()[0])\n except IndexError:\n first_image_name = image_name_cleaner(response.xpath(\"//div[@class='product-image']/a/img/@src\").extract()[0])\n image_names_list.append(first_image_name)\n try:\n first_image_url = response.xpath(\"//div[@class='product-image']/img/@src\").extract()[0]\n except IndexError:\n first_image_url = response.xpath(\"//div[@class='product-image']/a/img/@src\").extract()[0]\n image_url_list.append(first_image_url)\n image_name = image_names_list[0]\n item['images'] = zip(image_url_list, image_names_list)\n item['image'] = (\"data/tovar/\" + image_name)\n item['sort_order'] = 0\n item['category_id'] = 61 \n return item", "sub_path": "opencart/spiders/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3162, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scrapy.contrib.spiders.CrawlSpider", "line_number": 19, "usage_type": "name"}, {"api_name": "scrapy.contrib.spiders.Rule", "line_number": 25, "usage_type": "call"}, {"api_name": "scrapy.contrib.linkextractors.sgml.SgmlLinkExtractor", "line_number": 25, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 33, "usage_type": "call"}, {"api_name": "opencart.items.OpencartItem", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 44, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "357761965", "text": "from django.test import TestCase, Client\nfrom django.urls import reverse\nfrom comment.models import Comment\nfrom posts.models import Posts\nimport json\n\n\nclass TestViews(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.postscmadd_url = reverse('posts_cm_add', args=[1])\n self.commentdel_url = reverse('comments_del', args=[2])\n self.comment1 = Comment.objects.create(\n name=\"Ryan\",\n email=\"email@email.com\",\n cm='Comment',\n posts_id=1,\n date=1,\n time=1\n )\n\n\n def test_commentadd_POST(self):\n \n response = self.client.post(self.postscmadd_url, {\n 'name': \"Ryan\",\n 'email': \"email@email.com\",\n 'cm': 'Comment',\n 'posts_id': 1,\n 'date':1,\n 'time':1\n })\n\n self.assertEquals(response.status_code, 200)\n self.assertEquals(self.comment1.name, 'Ryan')\n\n\n def test_commentdel_DELETE(self):\n \n Comment.objects.create(\n name=\"Ryan\",\n email=\"email@email.com\",\n cm=\"Comment\",\n posts_id=1,\n date=1,\n time=1\n )\n\n response = self.client.delete(self.commentdel_url, json.dumps({\n 'name': \"Ryan\",\n 'email': \"email@email.com\",\n 'cm': 'Comment',\n 'posts_id': 1,\n 'date': 1,\n 'time': 1\n }))\n\n self.assertEquals(response.status_code, 302)\n self.assertEquals(self.comment1.name, 'Ryan')\n\n\n ", "sub_path": "rdpproject/comment/tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 1568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "django.test.Client", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 13, "usage_type": "call"}, {"api_name": "comment.models.Comment.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "comment.models.Comment.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "comment.models.Comment", "line_number": 14, "usage_type": "name"}, {"api_name": "comment.models.Comment.objects.create", "line_number": 41, "usage_type": "call"}, {"api_name": "comment.models.Comment.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "comment.models.Comment", "line_number": 41, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "255618331", "text": "import os\nimport re\nfrom subprocess import Popen, PIPE, STDOUT\n\nfrom django.conf import settings\n\ndef get_revision():\n\ttry:\n\t\tcommand = ['svnversion',settings.PROJECT_ROOT]\n\t\tstIO = Popen(command, stdout=PIPE, stderr=STDOUT)\n\t\tstIO.wait()\n\t\toutS = stIO.stdout.read().strip()\n\t\tm = re.match(':?(\\d*).*[MS]?', outS)\n\t\treturn m.group(1) or ' '\n\texcept:\n\t\treturn 'Versioning Unavailable'\n\nREVISION = get_revision()\n", "sub_path": "svnrevision/templatetags/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.conf.settings.PROJECT_ROOT", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 9, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 10, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 10, "usage_type": "name"}, {"api_name": "re.match", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "280406314", "text": "import io\nimport re\nimport random\n\nimport discord\nfrom discord.ext import commands\nfrom PIL import Image, ImageOps, ImageFilter\n\n\nclass Images:\n \"\"\"Image related commands\"\"\"\n\n @commands.command()\n async def invert(self, ctx, *, member: discord.Member = None):\n \"\"\"Inverts an image.\"\"\"\n\n member = member or ctx.author\n\n async with ctx.session.get(member.avatar_url_as(format='png')) as r:\n with io.BytesIO(await r.read()) as f:\n file = await ctx.bot.loop.run_in_executor(None, self.invert_image, f)\n\n await ctx.send(file=discord.File(file, f'inverted.png'))\n\n def invert_image(self, file):\n with Image.open(file) as image:\n if image.mode == 'RGBA':\n r, g, b, a = image.split()\n r, g, b = map(lambda image: image.point(lambda p: 255 - p), (r, g, b))\n inverted_image = Image.merge(image.mode, (r, g, b, a))\n else:\n inverted_image = ImageOps.invert(image)\n\n f = io.BytesIO()\n inverted_image.save(f, format='png')\n f.seek(0)\n\n return f\n\n @commands.command()\n async def needsmorejpeg(self, ctx, *, member: discord.Member = None):\n \"\"\"Lowers the quality of an image to its minimum.\"\"\"\n\n member = member or ctx.author\n\n async with ctx.session.get(member.avatar_url_as(format='png')) as r:\n with Image.open(io.BytesIO(await r.read())) as image:\n file = io.BytesIO()\n image.save(file, format='jpeg', quality=1)\n file.seek(0)\n\n await ctx.send(file=discord.File(file, 'needsmore.jpeg'))\n\n @commands.command()\n async def edge(self, ctx, *, member: discord.Member = None):\n member = member or ctx.author\n\n async with ctx.session.get(member.avatar_url_as(format='png')) as r:\n with io.BytesIO(await r.read()) as f:\n file = await ctx.bot.loop.run_in_executor(None, self.make_edge, f)\n\n await ctx.send(file=discord.File(file, 'edge.png'))\n\n def make_edge(self, file):\n with Image.open(file).convert('RGB') as image:\n horizontal = image.filter(ImageFilter.Kernel((3, 3), [-1, 0, 1, -1, 0, 1, -1, 0, 1], 1.0))\n vertical = image.filter(ImageFilter.Kernel((3, 3), [-1, -1, -1, 0, 0, 0, 1, 1, 1], 1.0))\n modified = Image.blend(horizontal, vertical, 0.5)\n\n f = io.BytesIO()\n modified.save(f, format='png')\n f.seek(0)\n\n return f\n\n @commands.command()\n async def retro(self, ctx, line_1: str, line_2: str = '', *, line_3: str = ''):\n if not re.fullmatch(r'[A-Za-z0-9 ]+', line_1):\n return await ctx.send('First line only supports alphanumerical characters.')\n\n data = {\n 'bcg': random.randint(1, 5),\n 'txt': random.randint(1, 4),\n 'text1': line_1,\n 'text2': line_2,\n 'text3': line_3,\n }\n\n async with ctx.session.post('https://photofunia.com/effects/retro-wave', data=data) as r:\n txt = await r.text()\n\n link = re.search(r'(https?.+?\\.jpg\\?download)', txt)\n async with ctx.session.get(link.group(1)) as r:\n await ctx.send(file=discord.File(io.BytesIO(await r.read()), 'retro.jpg'))\n\n @commands.command()\n async def moom(self, ctx, *, member: discord.Member = None):\n member = member or ctx.author\n\n await self.mirror(ctx, member.avatar_url_as(format='png'))\n\n @commands.command()\n async def dood(self, ctx, *, member: discord.Member = None):\n member = member or ctx.author\n\n await self.mirror(ctx, member.avatar_url_as(format='png'))\n\n async def mirror(self, ctx, link):\n async with ctx.session.get(link) as r:\n with io.BytesIO(await r.read()) as f:\n file = await ctx.bot.loop.run_in_executor(None, self.mirror_image, f, ctx.command.name)\n\n await ctx.send(file=discord.File(file, f'{ctx.command.name}.png'))\n\n def mirror_image(self, file, command_name):\n with Image.open(file) as image:\n width, height = image.size\n\n if command_name == 'dood':\n left = image.crop((0, 0, width / 2, height))\n else:\n left = ImageOps.mirror(image.crop((width / 2, 0, width, height)))\n right = ImageOps.mirror(left)\n final = Image.new('RGB', image.size)\n final.paste(left)\n final.paste(right, (int(width / 2), 0))\n\n f = io.BytesIO()\n final.save(f, format='png')\n f.seek(0)\n\n return f\n\n\ndef setup(bot):\n bot.add_cog(Images())\n", "sub_path": "cogs/images.py", "file_name": "images.py", "file_ext": "py", "file_size_in_byte": 4722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "discord.Member", "line_number": 14, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "PIL.Image.merge", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 30, "usage_type": "name"}, {"api_name": "PIL.ImageOps.invert", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 32, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 47, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 48, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 52, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 40, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 55, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 59, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 62, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 54, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 65, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.Kernel", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 66, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.Kernel", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 67, "usage_type": "name"}, {"api_name": "PIL.Image.blend", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 68, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 70, "usage_type": "call"}, {"api_name": "re.fullmatch", "line_number": 78, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 94, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 94, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 76, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 97, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 96, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 96, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 103, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 102, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 102, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 110, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 113, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 116, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 116, "usage_type": "name"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 122, "usage_type": "name"}, {"api_name": "PIL.ImageOps.mirror", "line_number": 123, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 123, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 124, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 124, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "243005886", "text": "import configparser\nfrom abc import ABC\nfrom typing import AnyStr, List, Union, Any\n\n\ndef _type_cast(variable: AnyStr) -> Union[AnyStr, int, float, bool]:\n variable = variable.strip()\n if variable.lower() == 'true':\n return True\n if variable.lower() == 'false':\n return False\n try:\n return int(variable)\n except ValueError:\n try:\n return float(variable)\n except ValueError:\n return variable\n\n\nclass ConfigModule(ABC):\n def __init__(self, config_files: Union[AnyStr, List[AnyStr]] = None):\n self._config = configparser.ConfigParser()\n self._config.read(config_files or [])\n # NOTE: configure文件中的变量并不区分大小写\n\n def read_config(self,\n section: AnyStr,\n field: AnyStr = None,\n fallback: Any = None\n ) -> Any:\n if field is None:\n section, field = 'default', section\n if section.lower() != 'default' and fallback is None:\n if 'default' in self._config and field in self._config['default']:\n fallback = _type_cast(self._config.get('default', field))\n if section not in self._config or field not in self._config[section]:\n return fallback\n return _type_cast(self._config.get(section, field))\n", "sub_path": "test_parallel/utils/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "typing.AnyStr", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 6, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.AnyStr", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.AnyStr", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "599887821", "text": "\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport re\n\n\nurl='https://leetcode.com/playground/VdgKnqNb/shared'\nres=requests.get(url)\nprint(res.text)\nprint(res.status_code)\n\nsp=bs(res.text,'lxml')\nprett=sp.prettify()\nprint(prett)\n\nfp=open('code.txt','w')\nfp.writelines(prett)\nfp.close()", "sub_path": "python/za/ariticle/123.py", "file_name": "123.py", "file_ext": "py", "file_size_in_byte": 293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "450751814", "text": "#!/usr/bin/env python3\nimport math\nimport client as ct\nimport ast\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom hierarchy_pos import hierarchy_pos\nimport time\n\nimport time\n\n\nclass State:\n \"\"\"Represents a node in the minimax decision tree and a possible state of the game at a given (hypothetical)\n time\"\"\"\n\n instances = 0\n\n action_offset = {\n \"stay\": (0, 0),\n \"north\": (0, -1),\n \"south\": (0, 1),\n \"east\": (1, 0),\n \"west\": (-1, 0)\n }\n\n goal_positions = None\n max_rounds = None\n obstacle_matrix = None\n\n @classmethod\n def set_obstacle_matrix(cls, obstacle_matrix):\n \"\"\"Defines the obstacle matrix, where at indexes [x][y] 1 means there is an obstacle at position at (x,y),\n 0 otherwise. Also defines the dimensions of the board\"\"\"\n cls.obstacle_matrix = obstacle_matrix\n cls.columns = len(obstacle_matrix)\n cls.rows = len(obstacle_matrix[0])\n\n @classmethod\n def set_goal_positions(cls, goal_positions):\n \"\"\"Defines the goal positions for the round that will be examined by the algorithm\"\"\"\n cls.goal_positions = goal_positions\n\n @classmethod\n def set_max_rounds(cls, max_rounds):\n \"\"\"Defines the duration of the game, which is also the depth of the search tree\"\"\"\n cls.max_rounds = max_rounds\n\n def __init__(self, is_max_turn, min_pos, max_pos, previous_round, graph=None, previous_name=None, action=None):\n \"\"\"Defines the state's attributes and, if visualization is enabled (work in progress), adds a corresponding node\n and edge to the tree graph.\"\"\"\n\n self.is_max_turn = is_max_turn\n self.min_pos = min_pos\n self.max_pos = max_pos\n self.round = previous_round + (1 if is_max_turn else 0)\n self.graph = graph\n self.name = \"root\"\n self.action = action\n\n # TODO: fix graph issues\n if self.graph is not None and (previous_name is not None):\n self.name = previous_name + str(\"Max\" if self.is_max_turn else \"Min\") + \\\n str(self.max_pos if self.is_max_turn else self.min_pos)\n graph.add_node(self.name) # NetworkX\n graph.add_edge(previous_name, self.name, action=action) # NetworkX\n\n State.instances += 1\n\n def result(self, action):\n \"\"\"Defines the state that results from doing a certain action in the state.\n In other words, this function is the transition model.\n\n Parameters:\n action (string): action description string\n\n Returns:\n (State): the resulting state\n \"\"\"\n if self.is_max_turn:\n new_max_pos = (self.max_pos[0] + self.action_offset[action][0],\n self.max_pos[1] + self.action_offset[action][1])\n new_min_pos = self.min_pos\n else:\n new_min_pos = (self.min_pos[0] + self.action_offset[action][0],\n self.min_pos[1] + self.action_offset[action][1])\n new_max_pos = self.max_pos\n return State(not self.is_max_turn, new_min_pos, new_max_pos, self.round,\n graph=self.graph, previous_name=self.name, action=action)\n\n def utility(self):\n \"\"\"Utility function (or payoff function). Defines the final numeric value for the game that ends in the state.\n Since the state knows which player is next and the game is a two-player game, there is no need to pass the\n player as an argument.\n\n Returns:\n (int) 0 if the minimizing player wins, 1 if the maximizing player wins\n \"\"\"\n u = 0 if self.min_pos in self.goal_positions else 1\n if self.graph is not None:\n self.graph.nodes[self.name][\"value\"] = u\n return u\n\n def is_terminal(self):\n \"\"\"Checks whether or not the game is over. Returns True if so, False otherwise.\n In other words, checks this state is a terminal state.\n\n Returns:\n (bool): whether or the state is a terminal state.\n \"\"\"\n return self.min_pos in State.goal_positions or self.round >= State.max_rounds\n\n def is_legal(self, action):\n \"\"\"Checks whether or not the next player can perform a certain action.\n\n Parameters:\n action (str): the action description string\n\n Returns:\n (bool): whether or not the next player can perform the given action\n \"\"\"\n player_pos = self.max_pos if self.is_max_turn else self.min_pos\n other_pos = self.min_pos if self.is_max_turn else self.max_pos\n offset = self.action_offset[action]\n new_x = (player_pos[0] + offset[0]) % self.columns\n new_y = (player_pos[1] + offset[1]) % self.rows\n return all((\n State.obstacle_matrix[new_x][new_y] == 0,\n (new_x, new_y) != other_pos,\n (not self.is_max_turn or (self.is_max_turn and (new_x, new_y) not in State.goal_positions))))\n\n @staticmethod\n def manhattan_distance(pos1, pos2):\n \"\"\" The Manhattan distance between two positions\n\n Parameters:\n pos1 (tuple or list): a position\n pos2 (tuple or list): another position\n\n Returns:\n (int or float): the Manhattan distance between pos1 and pos2\n \"\"\"\n return abs(pos2[0] - pos1[0]) + abs(pos2[1] - pos1[1])\n\n def killer_moves(self, action):\n \"\"\"Heuristic ordering function for the actions, based on the Manhattan distance to the goal or related tiles.\n\n Parameters:\n action (str): the action to rank\n\n Returns:\n (int): if it's the maximizing player's turn, returns the Manhattan distance from the position resulting\n from the action to the goal-adjacent tile that is closest to the minimizing player. If it's the\n minimizing player's turn, returns the Manhattan distance from position that results from the action to\n the goal.\n \"\"\"\n closest_goal = min(self.goal_positions, key=lambda x: self.manhattan_distance(x, self.min_pos))\n if not self.is_max_turn:\n hypothetical_pos = (self.max_pos[0] + self.action_offset[action][0],\n self.max_pos[1] + self.action_offset[action][1])\n dx, dy = closest_goal[0]-self.min_pos[0], closest_goal[1]-self.min_pos[1]\n if dx >= 0 and dx >= abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0] + 1, closest_goal[1]))\n if dx <= 0 and dx >= abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0] - 1, closest_goal[1]))\n if dy >= 0 and dx < abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0], closest_goal[1] + 1))\n if dy <= 0 and dx < abs(dy):\n return self.manhattan_distance(hypothetical_pos, (closest_goal[0], closest_goal[1] - 1))\n else:\n hypothetical_pos = (self.min_pos[0] + self.action_offset[action][0],\n self.min_pos[1] + self.action_offset[action][1])\n return self.manhattan_distance(hypothetical_pos, closest_goal)\n\n\n def actions(self):\n \"\"\"Returns all actions that the next player is allowed to perform in its turn.\n\n Returns:\n (list) list of strings representing all legal actions (\"north\", \"south\", \"east\", \"west\", \"stay\"),\n ordered by the killer-moves heuristic\n \"\"\"\n return sorted([action for action in State.action_offset if self.is_legal(action)], key=self.killer_moves)\n #return [action for action in State.action_offset if self.is_legal(action)]\n\n def max_value(self, alpha, beta, action):\n \"\"\"Explores, in a tree-like fashion, the outcomes of all possible actions in the state from the perspective of\n the minimizing player, without ever exploring the outcomes that could have no influence on the final decision.\n\n Parameters:\n alpha (int): the value of the best choice found so far in the path for the maximizing player\n beta (int): the value of the best choice found so far in the path for the minimizing player\n action (str): the action that resulted in the state that the function is given\n\n Returns:\n (str): the action that resulted in the value to assign to this state\n (int): the value to assign to this state, according to the minimax algorithm, from the perspective of\n the minimizing player.\n \"\"\"\n\n if self.is_terminal():\n return action, self.utility()\n\n value = -1000\n for a in self.actions():\n action, value = max((action, value),\n (a, self.result(a).min_value(alpha, beta, a)[1]),\n key=lambda x: x[1])\n if value >= beta:\n return action, value\n alpha = min(alpha, value)\n\n if self.graph is not None:\n self.graph.nodes[self.name][\"value\"] = value\n\n return action, value\n\n def min_value(self, alpha, beta, action):\n \"\"\"Explores, in a tree-like fashion, the outcomes of all possible actions in the state from the perspective of\n the minimizing player, without ever exploring the outcomes that could have no influence on the final decision.\n\n Parameters:\n alpha (int): the value of the best choice found so far in the path for the maximizing player\n beta (int): the value of the best choice found so far in the path for the minimizing player\n action (str): the action that resulted in the state that the function is given\n\n Returns:\n (str): the action that resulted in the value to assing to this state\n (int): the value to assign to this state, according to the minimax algorithm, from the perspective of the\n minimizing player.\n \"\"\"\n\n if self.is_terminal():\n return action, self.utility()\n\n value = 1000\n for a in self.actions():\n action, value = min((action, value),\n (a, self.result(a).max_value(alpha, beta, a)[1]),\n key=lambda x: x[1])\n if value <= alpha:\n return action, value\n beta = min(beta, value)\n\n if self.graph is not None:\n self.graph.nodes[self.name][\"value\"] = value\n return action, value\n\n\nclass Agent:\n \"\"\"Describes an adversarial agent\"\"\"\n\n def __init__(self):\n \"\"\"Simply initializes the agent\"\"\"\n self.current_state = None\n\n def set_state(self, state_description):\n \"\"\"Defines the current state of the game from a state description dictionary provided by the Agent1 server\n\n Parameters:\n state_description (dict): the state description dictionary\n\n \"\"\"\n self.current_state = State(state_description[\"agent_id\"] == 0,\n state_description[\"agents\"][0],\n state_description[\"agents\"][1],\n state_description[\"round\"],\n None, # nx.Graph(),\n None,\n \"root\")\n\n def alpha_beta_search(self):\n \"\"\"Returns the action description string that corresponds to the best action the agent can execute, that is, to\n the action that leads to the outcome with the best utility for the agent, assuming the adversary wants to\n minimize it.\n This search is optimized using a technique called alpha-beta pruning, a technique that prevents the minimax\n algorithm from exploring outcomes that have no possible influence on the final decision\n\n Returns:\n (str): the action description string.\n \"\"\"\n a, v = self.current_state.max_value(-1000, 1000, \"stay\")\n\n if self.current_state.graph is not None:\n labels = nx.get_node_attributes(self.current_state.graph, \"value\")\n edge_labels = nx.get_edge_attributes(self.current_state.graph, \"action\")\n # print(edge_labels)\n plt.figure(figsize=(20, 20))\n\n pos = hierarchy_pos(self.current_state.graph, \"root\", width=2 * math.pi, xcenter=0)\n new_pos = {u: (r * math.cos(theta), r * math.sin(theta)) for u, (theta, r) in pos.items()}\n\n nx.draw(self.current_state.graph, new_pos, node_size=20, alpha=0.5, node_color=\"blue\", labels=labels)\n nx.draw_networkx_edge_labels(self.current_state.graph, new_pos, edge_labels=edge_labels)\n plt.axis(\"equal\")\n plt.show()\n\n return a\n\ndef parse_last_dict(bad_string):\n \"\"\"Returns the string that corresponds to the last open and closed curly brackets.\n This is necessary due to the way the server/client interaction works, so as to identify the last server response.\n Ideally, this would be unnecessary, but this had to be implemented due to time restrictions that prevented the group\n from exploring the software.\n\n Parameters:\n bad_string (str): the string to parse\n\n Returns:\n (str): the resulting good string\n\n \"\"\"\n return bad_string[bad_string.rindex(\"{\"):]\n\n\ndef main(rounds):\n \"\"\"Game loop. Creates two clients and cycles between them.\n The first client is the minimizing player, or the human,\n and so the program waits for user input and sends the corresponding action value pair to the server.\n The second\n client is the maximizing player, and so the program calculates the best possible decision based on the minimax algorithm and sends\n the corresponding action value pair to the server.\n\n Parameters:\n rounds (int): the number of game rounds\n\n Returns:\n None\n \"\"\"\n client_min = ct.Client('127.0.0.1', 50000)\n client_max = ct.Client('127.0.0.1', 50000)\n res_min = client_min.connect()\n res_max = client_max.connect()\n if all(res != -1 for res in (res_min, res_max)):\n\n agent = Agent()\n State.set_max_rounds(rounds)\n\n while True:\n command, action = input(\"Min > \").split(\" \")\n client_min.execute(command, action)\n\n state = ast.literal_eval(parse_last_dict(client_max.receiveData()))\n State.set_goal_positions(state[\"goals\"])\n State.set_obstacle_matrix(state[\"obstacles\"])\n agent.set_state(state)\n start = time.perf_counter()\n action = agent.alpha_beta_search()\n stop = time.perf_counter()\n print(\"Max > command\", action)\n print(\"Elapsed time:\", stop - start, \"Generated nodes:\", State.instances)\n State.instances = 0\n client_max.execute(\"command\", action)\n\n if agent.current_state.result(action).is_terminal():\n input(\"O jogo terminou.\")\n break\n\n\nif __name__ == \"__main__\":\n main(rounds=5)\n", "sub_path": "client/alphabeta_oo.py", "file_name": "alphabeta_oo.py", "file_ext": "py", "file_size_in_byte": 15086, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "networkx.get_node_attributes", "line_number": 285, "usage_type": "call"}, {"api_name": "networkx.get_edge_attributes", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "hierarchy_pos.hierarchy_pos", "line_number": 290, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 290, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 291, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 291, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 293, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edge_labels", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "client.Client", "line_number": 330, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 331, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 343, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 347, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 349, "usage_type": "call"}]} +{"seq_id": "260001802", "text": "#!/usr/bin/env python\nfrom __future__ import print_function, unicode_literals\nimport sys\nimport re\nimport codecs\n\nif sys.version_info.major < 3:\n # The Python 2 open doesn't have an encoding= parameter\n from io import open\n\n # From https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n UTF8Writer = codecs.getwriter('utf8')\n sys.stdout = UTF8Writer(sys.stdout)\n\n\ntag_re = re.compile(\n r\"^({{[\\w@]+}})$\" # ()Capture {{Tag}} word,\n r\"(.*?)\" # then ()capture characters until\n r\"^@@$\", # terminating \"@@\" line.\n flags=re.DOTALL | re.MULTILINE) # '.' includes '\\n' | '^' and '$' are per line.\n\n\ndef get_tagged_content(file_path):\n with open(file_path, mode=\"r\", encoding=\"utf8\") as content:\n tag_content = dict(re.findall(tag_re, ''.join(content.readlines())))\n return tag_content\n\n\ndef write_output(tagged_content, output_function):\n with open(\"template.html\", mode=\"r\", encoding=\"utf8\") as template:\n for line in template.readlines():\n output_function(tagged_content.get(line.strip(), line))\n\n\nif __name__ == \"__main__\":\n # print(\"processing {}\".format(sys.argv[1]), file=sys.stderr)\n def print_func(string):\n print(string, end=\"\")\n\n write_output(get_tagged_content(sys.argv[1]), print_func)\n", "sub_path": "render.py", "file_name": "render.py", "file_ext": "py", "file_size_in_byte": 1308, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.version_info", "line_number": 7, "usage_type": "attribute"}, {"api_name": "codecs.getwriter", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 13, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "re.MULTILINE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 24, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 25, "usage_type": "call"}, {"api_name": "io.open", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "124241010", "text": "from time import sleep\n\nfrom selenium.webdriver import ActionChains\n\nfrom day_test08.Base import Base\n\n\nclass TestAlter(Base):\n def test_alter(self):\n self.driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')\n self.driver.switch_to_frame(\"iframeResult\")\n drag=self.driver.find_element_by_id('draggable')\n drop=self.driver.find_element_by_id('droppable')\n action=ActionChains(self.driver)\n action.drag_and_drop(drag,drop).perform()\n sleep(3)\n print(\"点击 alter 确认\")\n self.driver.switch_to.alert.accept()\n self.driver.switch_to.default_content()\n self.driver.find_element_by_id('submitBTN').click()\n\n sleep(4)", "sub_path": "day_test08/test_alter.py", "file_name": "test_alter.py", "file_ext": "py", "file_size_in_byte": 728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "day_test08.Base.Base", "line_number": 8, "usage_type": "name"}, {"api_name": "selenium.webdriver.ActionChains", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "407901495", "text": "import pygame\nimport time\nfrom Node import Node\nfrom copy import deepcopy\n\n\ndef draw_lines(screen, strikes):\n for row in range(9):\n if row != 0:\n if row % 3 == 0:\n pygame.draw.line(screen, (0, 0, 0), (0, row * 60), (540, row * 60), 5)\n else:\n pygame.draw.line(screen, (0, 0, 0), (0, row * 60), (540, row * 60))\n for col in range(9):\n if col != 0:\n if col % 3 == 0:\n pygame.draw.line(screen, (0, 0, 0), (col * 60, 0), (col * 60, 540), 5)\n else:\n pygame.draw.line(screen, (0, 0, 0), (col * 60, 0), (col * 60, 540))\n pygame.draw.rect(screen, (255, 255, 255), (0, 540, 540, 60))\n pygame.draw.line(screen, (0, 0, 0), (0, 540), (540, 540), 3)\n for i in range(len(strikes)):\n if strikes[i] == 1:\n pygame.draw.line(screen, (255, 0, 0), (540/2-(i*-60+65), 560), (540/2+10-(i*-60+65), 580), 3)\n pygame.draw.line(screen, (255, 0, 0), (540/2+10-(i*-60+65), 560), (540/2-(i*-60+65), 580), 3)\n\n\ndef find_clicked_node(grid, pos, clicked_grid, orig_board):\n clicked = None\n orig_clicked = None\n for row in range(9):\n for col in range(9):\n n = grid[row][col]\n if ((pos[0] < n.col < pos[0]+60) and (pos[1] < n.row < pos[1]+60)) and (orig_board[row][col] == 0):\n clicked_grid[row][col] = True\n clicked = row, col\n else:\n if clicked_grid[row][col]:\n orig_clicked = row, col\n clicked_grid[row][col] = False\n if (orig_clicked is not None) and (clicked is None):\n clicked_grid[orig_clicked[0]][orig_clicked[1]] = True\n return None\n else:\n return clicked\n\n\ndef update_grid(screen, new_node, board, myfont):\n for row in range(9):\n for col in range(9):\n if row == new_node[0] and col == new_node[1]:\n pygame.draw.rect(screen, (100, 100, 100), (col * 60, row * 60, 60, 60))\n else:\n pygame.draw.rect(screen, (255, 255, 255), (col * 60, row * 60, 60, 60))\n if board[row][col] != 0:\n textsurface = myfont.render(str(board[row][col]), True, (0, 0, 0))\n screen.blit(textsurface, (col * 60 + 23, row * 60 + 15))\n\n\ndef strike(strikes):\n for i in range(len(strikes)):\n if strikes[i] == 0:\n strikes[i] = 1\n if i == len(strikes)-1:\n return False\n return True\n break\n return False\n\n\ndef solve_soduku(sudoku, screen):\n \"\"\"\n Solves the board sudoku\n :param screen:\n :param sudoku: the board that needs to be solved\n :return:\n \"\"\"\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n # Creates a copy of the sudoku board so that we don't mess up the original board\n solved_board = sudoku.board\n\n # Stores the index of the next number that should be tried (the index will be used with the possible_nums list)\n try_new_nums = [[0] * 9 for y in range(9)]\n\n # Creates a list that will act like a stack for the depth first search (stores tuples (row, col) for each unsolved square)\n nodes = [sudoku.find_next_empty_node((0, -1))]\n\n done = False\n\n # Keeps running until the puzzle is either solved or runs out of possible combinations\n while len(nodes) != 0:\n\n time.sleep(.001)\n\n if not done:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n\n pygame.display.update()\n\n # finds all possible numbers that can go into the current unsolved square\n one = set(sudoku.check_vertically(nodes[len(nodes) - 1], solved_board))\n two = set(sudoku.check_horizontally(nodes[len(nodes) - 1], solved_board))\n three = set(sudoku.check_box(nodes[len(nodes) - 1], solved_board))\n possible_nums = list(one.intersection(two).intersection(three))\n\n # Determines if there is a number that can be put into the current unsolved square\n if len(possible_nums) > 0:\n\n # Stores the current number in the current unsolved square\n curr_num = solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]]\n\n # Stores the next number that will be tried in the current unsolved square\n possible_next_num = possible_nums[\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] % len(possible_nums)]\n\n # Makes sure that the code doesn't get stuck trying the same combos\n if try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] == len(possible_nums):\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Makes sure that the code doesn't get stuck on trying the same number\n if possible_next_num == curr_num:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Sets the unsolved square to the next number that is to be tried\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = possible_next_num\n\n # Changes which index will be used to find a different number if the new number does not work\n try_new_nums[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] += 1\n\n # if there are no possible numbers for the current square, it backtracks to the last number that can change\n else:\n solved_board[nodes[len(nodes) - 1][0]][nodes[len(nodes) - 1][1]] = 0\n nodes.pop()\n continue\n\n # Determines if there is still an empty unsolved square left\n if sudoku.has_next_emtpy_node(nodes[len(nodes) - 1]):\n nodes.append(sudoku.find_next_empty_node(nodes[len(nodes) - 1]))\n else:\n update_grid(screen, (nodes[len(nodes) - 1][0], nodes[len(nodes) - 1][1]), solved_board, myfont)\n draw_lines(screen, [0, 0, 0])\n done = True\n\n\nclass SudokuGraphics:\n\n def __init__(self, sudoku, s):\n pygame.init()\n\n pygame.font.init()\n\n myfont = pygame.font.SysFont('Times New Roman', 30)\n\n screen = pygame.display.set_mode((540, 600))\n\n orig_board = sudoku.board\n\n solve_board = deepcopy(orig_board)\n\n solved = s\n\n strikes = [0, 0, 0]\n\n grid = [[0 for i in range(9)] for j in range(9)]\n\n clicked_grid = [[False for i in range(9)] for j in range(9)]\n\n clicked = None\n\n curr_num = 0\n\n for row in range(9):\n for col in range(9):\n pygame.draw.rect(screen, (255, 255, 255), (col * 60, row * 60, 60, 60))\n if orig_board[row][col] != 0:\n textsurface = myfont.render(str(orig_board[row][col]), True, (0, 0, 0))\n screen.blit(textsurface, (col*60+23, row*60+15))\n grid[row][col] = Node(row*60+60, col*60+60)\n\n draw_lines(screen, strikes)\n\n run = True\n while run:\n pygame.time.delay(50)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n curr_num = 1\n elif event.key == pygame.K_2:\n curr_num = 2\n elif event.key == pygame.K_3:\n curr_num = 3\n elif event.key == pygame.K_4:\n curr_num = 4\n elif event.key == pygame.K_5:\n curr_num = 5\n elif event.key == pygame.K_6:\n curr_num = 6\n elif event.key == pygame.K_7:\n curr_num = 7\n elif event.key == pygame.K_8:\n curr_num = 8\n elif event.key == pygame.K_9:\n curr_num = 9\n elif event.key == pygame.K_0:\n curr_num = 0\n elif event.key == pygame.K_SPACE:\n sudoku.board = solve_board\n solve_soduku(sudoku, screen)\n if clicked is not None:\n if curr_num == solved[clicked[0]][clicked[1]]:\n solve_board[clicked[0]][clicked[1]] = curr_num\n update_grid(screen, clicked, solve_board, myfont)\n draw_lines(screen, strikes)\n else:\n run = strike(strikes)\n draw_lines(screen, strikes)\n\n if pygame.mouse.get_pressed()[0] == 1:\n clicked = find_clicked_node(grid, pygame.mouse.get_pos(), clicked_grid, orig_board)\n\n if clicked is not None and orig_board[clicked[0]][clicked[1]] == 0:\n update_grid(screen, clicked, solve_board, myfont)\n draw_lines(screen, strikes)\n\n pygame.display.update()\n\n pygame.quit()\n", "sub_path": "SudokuGraphics.py", "file_name": "SudokuGraphics.py", "file_ext": "py", "file_size_in_byte": 9350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.draw.line", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 79, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 156, "usage_type": "call"}, {"api_name": "pygame.font.init", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 162, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 182, "usage_type": "attribute"}, {"api_name": "Node.Node", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 192, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.K_1", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pygame.K_3", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pygame.K_4", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.K_5", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pygame.K_6", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.K_7", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pygame.K_8", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pygame.K_9", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.K_0", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 218, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 230, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 231, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 237, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 237, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 239, "usage_type": "call"}]} +{"seq_id": "159066815", "text": "import pwn, tempfile, subprocess, errno, os\nimport pwn.internal.shellcode_helper as H\n\ndef _asm_real(arch, os, blocks, emit_asm, checked = True):\n if arch in ['i386', 'amd64']:\n return pwn.nasm.nasm(arch, os, blocks, emit_asm, checked)\n else:\n return pwn.gas.gas(arch, os, blocks, emit_asm, checked)\n\ndef asm(*blocks, **kwargs):\n blocks = H.AssemblerContainer(*blocks, os=kwargs.get('os'), arch=kwargs.get('arch'), cast = 'text')\n emit_asm = kwargs.get('emit_asm', False)\n\n if all(isinstance(b, H.AssemblerBlob) for b in blocks.blocks):\n data = pwn.flat(b.blob for b in blocks.blocks)\n if emit_asm:\n return 'The following blob was computed:\\n' + data.encode('hex')\n else:\n return data\n\n system = pwn.with_context(os = blocks.os, arch = blocks.arch)\n return _asm_real(system['arch'], system['os'], blocks, emit_asm, kwargs.get('checked', True))\n", "sub_path": "pwn/asm.py", "file_name": "asm.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pwn.nasm.nasm", "line_number": 6, "usage_type": "call"}, {"api_name": "pwn.nasm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pwn.gas.gas", "line_number": 8, "usage_type": "call"}, {"api_name": "pwn.gas", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pwn.internal.shellcode_helper.AssemblerContainer", "line_number": 11, "usage_type": "call"}, {"api_name": "pwn.internal.shellcode_helper", "line_number": 11, "usage_type": "name"}, {"api_name": "pwn.internal.shellcode_helper.AssemblerBlob", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pwn.internal.shellcode_helper", "line_number": 14, "usage_type": "name"}, {"api_name": "pwn.flat", "line_number": 15, "usage_type": "call"}, {"api_name": "pwn.with_context", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "229417612", "text": "# Import libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import date\nfrom sklearn.linear_model import LinearRegression as lm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import metrics\n\n# Read in the dataset\ndataset = pd.read_csv(r'../Melbourne_housing_FULL.csv', skip_blank_lines=True)\ndataset[\"Date\"] = pd.to_datetime(dataset[\"Date\"],dayfirst=True)\n\n# Initialize arrays\ndataset_dr = dataset.dropna().sort_values(\"Date\")\ndataset_dr = dataset_dr\nfull_Data = []\n\n#How many days since start\ndays_since_start = [(x - dataset_dr[\"Date\"].min()).days for x in dataset_dr[\"Date\"]]\ndataset_dr[\"Days\"] = days_since_start\n\nsuburb_dummies = pd.get_dummies(dataset_dr[[\"Type\", \"Method\"]])\n\nfull_Data = dataset_dr.drop([\"Address\", \"Price\", \"Date\", \"SellerG\", \"Suburb\", \"Type\", \"Method\", \"CouncilArea\", \"Regionname\"], axis=1).join(suburb_dummies)\n\nX = full_Data\ny = dataset_dr[\"Price\"]\n\n# Split into test data and training data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n# Train the algorithm\nregressor = lm()\nregressor.fit(X_train, y_train)\nprint(\"Intercept: {}\" .format(regressor.intercept_))\ncoeff_df = pd.DataFrame(regressor.coef_,X.columns,columns=['Coefficient'])\nranked_suburbs = coeff_df.sort_values(\"Coefficient\", ascending = False)\nprint(ranked_suburbs)\n\n# Calculate linear predictions\ny_pred = regressor.predict(X_test)\n\n# Metrics\nprint('MSE:', metrics.mean_squared_error(y_test, y_pred))\nprint(\"MAE:\", metrics.mean_absolute_error(y_test, y_pred))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\n# Plot\n\nplt.scatter(y_test, y_pred)\nplt.ylim([200000,1000000])\nplt.xlim([200000,1000000])\n\nsns.displot((y_test-y_pred), bins=50, kde=True)\nplt.show()\n", "sub_path": "TrainAndTest/regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 1838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 47, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "seaborn.displot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "524133835", "text": "import numpy as np\n\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom alexnet import AlexNet\nfrom caffe_classes import class_names\nimport tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimagenet_mean=np.array([104.,117.,124.],dtype=np.float32)\ncurrent=os.getcwd()\nprint(current)\nimage_dir=os.path.join(current,'images')\nimg_files=[os.path.join(image_dir,f)for f in os.listdir(image_dir) if f.endswith('.jpeg')]\nimgs=[]\nfor f in img_files:\n imgs.append(cv2.imread(f))\nfig=plt.figure(figsize=(15,6))\n# for i,img in enumerate(imgs):\n# fig.add_subplot(1,3,i+1)\n# # plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2GRB))\n# plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n# plt.axis('off')\n# plt.show()\n\nx=tf.placeholder(tf.float32,[1,227,227,3])\nkeep_prob=tf.placeholder(tf.float32)\nmodel=AlexNet(x,keep_prob,1000,[])\nscore=model.fc8\nsoftmax=tf.nn.softmax(score)\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.allocator_type = 'BFC'\nwith tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n model.load_initial_weights(sess)\n fig2=plt.figure(figsize=(15,6))\n for i,image in enumerate(imgs):\n # cv2.imshow(str(i),image)\n img=cv2.resize(image.astype(np.float32),(227,227))\n img-=imagenet_mean\n img=img.reshape((1,227,227,3))\n probs=sess.run(softmax,feed_dict={x:img,keep_prob:1})\n class_name=class_names[np.argmax(probs)]\n maxVal=probs.max()\n cv2.imwrite('./output/'+class_name+'_'+str(maxVal)+'.jpeg',image)", "sub_path": "val.py", "file_name": "val.py", "file_ext": "py", "file_size_in_byte": 1622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "alexnet.AlexNet", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "caffe_classes.class_names", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "46830610", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 3 10:57:50 2019\n\n@author: ngritti\n\"\"\"\nfrom PyQt5.QtCore import Qt, QUrl\nfrom PyQt5.QtWidgets import (QApplication, QComboBox, QVBoxLayout, QDialog,\n QGridLayout, QGroupBox, QLabel, QLineEdit, QPushButton,\n QFileDialog, QMessageBox, QTabWidget, QWidget,\n QTableWidget, QTableWidgetItem, QSpinBox, QDoubleSpinBox,QCheckBox,\n QSplitter, QTreeView, QListView, QFileSystemModel, QAbstractItemView)\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport numpy as np\nimport sys, warnings, os, time\nfrom skimage.io import imread, imsave\nimport scipy.ndimage as ndi\nfrom collections.abc import Iterable\n\nfrom morgana.GUIs import manualmask\nfrom morgana.GUIs import inspection\nfrom morgana.GUIs import visualize0d\nfrom morgana.GUIs import visualize1d\nfrom morgana.GUIs import visualize2d\nfrom morgana.MLModel import io as ioML\nfrom morgana.MLModel import train\nfrom morgana.MLModel import predict\nfrom morgana.MLModel import overview as overviewML\nfrom morgana.DatasetTools import io as ioDT\nfrom morgana.DatasetTools.morphology import overview as overviewDT\nfrom morgana.DatasetTools import arrangemorphodata\nfrom morgana.DatasetTools import arrangefluodata\nfrom morgana.ImageTools.objectsparsing import objectsparser\nwarnings.filterwarnings(\"ignore\")\n\n\nclass morganaApp(QWidget):\n def __init__(self, parent=None):\n super(morganaApp, self).__init__(parent)\n\n self.modelFolder = '-'\n self.imageFolder = '-'\n self.imageImportFolder = '-'\n self.maskFolder = '-'\n self.classifier = None\n self.scaler = None\n self.params = { 'sigmas': [1,2,5,15],\n 'down_shape': 0.25,\n 'edge_size': 2,\n 'fraction': 0.5,\n 'bias': 0.5,\n 'feature_mode': 'ilastik' }\n\n tabs = QTabWidget()\n self.maskTab = self.createMaskTab()\n tabs.addTab(self.maskTab,'Generate or Import Masks')\n\n self.quantificationTab = self.createQuantificationTab()\n tabs.addTab(self.quantificationTab,'Quantification')\n\n ### defined handler for subwindows\n self.inspector = None\n self.quantifier = []\n\n ####################################################################################################\n '''\n TESTS WITHOUT CLICKING\n '''\n\n ####################################################################################################\n\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(tabs)\n self.setLayout(self.layout)\n\n self.setWindowTitle('Organoids Segmentation App')\n QApplication.setStyle('Fusion')\n\n\n\n '''\n MASK TAB\n '''\n def createMaskTab(self): \n mainTab = QWidget()\n self.createModelGroup()\n self.createImportGroup()\n \n self.isMask = QCheckBox(\"Import external masks\")\n self.isMask.toggle()\n self.isMask.stateChanged.connect(self.changeMaskGroup)\n self.isMask.setChecked(False)\n \n mainTabLayout = QVBoxLayout() \n mainTabLayout.addWidget(self.isMask)\n mainTabLayout.addWidget(self.modelGroup)\n mainTabLayout.addWidget(self.importGroup)\n mainTab.setLayout(mainTabLayout)\n return mainTab\n \n def changeMaskGroup(self):\n if self.isMask.isChecked():\n self.modelGroup.hide()\n self.importGroup.show()\n else:\n self.importGroup.hide()\n self.modelGroup.show()\n\n \n '''\n Generating model and generation of masks\n '''\n def createModelGroup(self):\n self.modelGroup = QGroupBox(\"\")\n\n \n ########## create buttons for model definition group ##############\n self.modelDefGroup = QGroupBox(\"Machine Learning model definition\")\n\n selectModel = QPushButton(\"Specify model folder\")\n selectModel.setFocusPolicy(Qt.NoFocus)\n selectModel.clicked.connect( self.selectModelFolder )\n self.modelFolderSpace = QLineEdit(); self.modelFolderSpace.setText(self.modelFolder)\n self.modelFolderSpace.setReadOnly(True)\n self.modelFolderSpace.setStyleSheet('color:gray;')\n self.deepModel = QCheckBox(\"Use Multi Layer Perceptrons\")\n self.deepModel.setChecked(False)\n\n self.showMoreButton = QPushButton(\"Show/Hide params\")\n self.showMoreButton.setFocusPolicy(Qt.NoFocus)\n self.showMoreButton.clicked.connect(self.show_hide)\n\n self.sigmasLabel = QLabel('Sigmas:')\n self.sigmasSpace = QLineEdit(); self.sigmasSpace.setText(\"-\")\n self.sigmasSpace.setEnabled(False)\n self.down_shapeLabel = QLabel('Downscaling:')\n self.down_shapeSpace = QDoubleSpinBox(); self.down_shapeSpace.setSpecialValueText(\"-\")\n self.down_shapeSpace.setMinimum(-1); self.down_shapeSpace.setMaximum(1); self.down_shapeSpace.setSingleStep(.01);\n self.down_shapeSpace.setEnabled(False)\n self.edge_sizeLabel = QLabel('Edge size:')\n self.edge_sizeSpace = QSpinBox(); self.edge_sizeSpace.setSpecialValueText(\"-\")\n self.edge_sizeSpace.setMinimum(0);\n self.edge_sizeSpace.setEnabled(False)\n self.fractionLabel = QLabel('Pixel% extraction:')\n self.fractionSpace = QDoubleSpinBox(); self.fractionSpace.setSpecialValueText(\"-\")\n self.fractionSpace.setMinimum(0); self.fractionSpace.setMaximum(1); self.fractionSpace.setSingleStep(.1);\n self.fractionSpace.setEnabled(False)\n self.biasLabel = QLabel('Extraction bias:')\n self.biasSpace = QDoubleSpinBox(); self.biasSpace.setSpecialValueText(\"-\")\n self.biasSpace.setMinimum(0); self.biasSpace.setMaximum(1); self.biasSpace.setSingleStep(.1);\n self.biasSpace.setEnabled(False)\n self.featuresLabel = QLabel('Features:')\n self.feature_modeSpace = QComboBox();\n self.feature_modeSpace.addItems(['-','daisy','ilastik']);\n self.feature_modeSpace.setCurrentIndex(0)\n self.feature_modeSpace.setEnabled(False)\n\n self.trainButton = QPushButton(\"Train model\")\n self.trainButton.setEnabled(False)\n self.trainButton.setFocusPolicy(Qt.NoFocus)\n self.trainButton.clicked.connect(self.trainModel)\n\n ########## create buttons for model application group ##############\n self.predictionGroup = QGroupBox(\"Machine Learning model application\")\n\n selectFolder = QPushButton(\"Specify image folder\")\n selectFolder.setFocusPolicy(Qt.NoFocus)\n selectFolder.clicked.connect( self.selectImageFolder )\n self.imageFolderSpace = QLineEdit(); self.imageFolderSpace.setText(self.imageFolder)\n self.imageFolderSpace.setReadOnly(True)\n self.imageFolderSpace.setStyleSheet('color:gray;')\n\n self.predictButton = QPushButton(\"Generate masks\")\n self.predictButton.setFocusPolicy(Qt.NoFocus)\n self.predictButton.clicked.connect(self.predict)\n self.predictButton.setEnabled(False)\n\n self.recapButton = QPushButton(\"Save overview image of masks\")\n self.recapButton.setFocusPolicy(Qt.NoFocus)\n self.recapButton.clicked.connect(self.makeRecap)\n self.recapButton.setEnabled(False)\n\n self.inspectButton = QPushButton(\"Inspect masks\")\n self.inspectButton.setFocusPolicy(Qt.NoFocus)\n self.inspectButton.clicked.connect(self.openInspectionWindow)\n self.inspectButton.setEnabled(False)\n\n ######### create layout for model definition group ########\n layout = QGridLayout()\n\n # layout.addWidget(self.welcomeText, 0,0,1,2)\n layout.addWidget(selectModel, 1,0,1,2)\n layout.addWidget(QLabel('Model folder:'), 2,0,1,1)\n layout.addWidget(self.modelFolderSpace, 2,1,1,1)\n layout.addWidget(self.deepModel, 3,0,1,1)\n\n layout.addWidget(self.showMoreButton, 4,0,1,1)\n layout.addWidget(self.trainButton, 4,1,1,1)\n layout.addWidget(self.sigmasLabel, 5,0,1,1)\n layout.addWidget(self.sigmasSpace, 5,1,1,1)\n layout.addWidget(self.down_shapeLabel, 6,0,1,1)\n layout.addWidget(self.down_shapeSpace, 6,1,1,1)\n layout.addWidget(self.edge_sizeLabel, 7,0,1,1)\n layout.addWidget(self.edge_sizeSpace, 7,1,1,1)\n layout.addWidget(self.fractionLabel, 8,0,1,1)\n layout.addWidget(self.fractionSpace, 8,1,1,1)\n layout.addWidget(self.biasLabel, 9,0,1,1)\n layout.addWidget(self.biasSpace, 9,1,1,1)\n layout.addWidget(self.featuresLabel, 10,0,1,1)\n layout.addWidget(self.feature_modeSpace, 10,1,1,1)\n\n self.modelDefGroup.setLayout(layout)\n\n ######### create layout for model application group ########\n layout = QGridLayout()\n\n layout.addWidget(selectFolder, 13,0,1,2)\n layout.addWidget(QLabel('Image folder:'), 14,0,1,1)\n layout.addWidget(self.imageFolderSpace, 14,1,1,1)\n layout.addWidget(self.predictButton, 15,0,1,2)\n layout.addWidget(self.recapButton, 16,0,1,2)\n layout.addWidget(self.inspectButton, 17,0,1,2)\n\n self.predictionGroup.setLayout(layout)\n\n ##################################################################\n layout = QVBoxLayout()\n\n layout.addWidget(self.modelDefGroup)\n layout.addWidget(self.predictionGroup)\n \n self.sigmasLabel.hide()\n self.sigmasSpace.hide()\n self.down_shapeLabel.hide()\n self.down_shapeSpace.hide()\n self.edge_sizeLabel.hide()\n self.edge_sizeSpace.hide()\n self.fractionLabel.hide()\n self.fractionSpace.hide()\n self.biasLabel.hide()\n self.biasSpace.hide()\n self.featuresLabel.hide()\n self.feature_modeSpace.hide()\n self.showMoreModel = False\n\n self.modelGroup.setLayout(layout)\n\n def show_hide(self):\n if self.showMoreModel:\n self.sigmasLabel.hide()\n self.sigmasSpace.hide()\n self.down_shapeLabel.hide()\n self.down_shapeSpace.hide()\n self.edge_sizeLabel.hide()\n self.edge_sizeSpace.hide()\n self.fractionLabel.hide()\n self.fractionSpace.hide()\n self.biasLabel.hide()\n self.biasSpace.hide()\n self.featuresLabel.hide()\n self.feature_modeSpace.hide()\n self.showMoreModel = False\n else:\n self.sigmasLabel.show()\n self.sigmasSpace.show()\n self.down_shapeLabel.show()\n self.down_shapeSpace.show()\n self.edge_sizeLabel.show()\n self.edge_sizeSpace.show()\n self.fractionLabel.show()\n self.fractionSpace.show()\n self.biasLabel.show()\n self.biasSpace.show()\n self.featuresLabel.show()\n self.feature_modeSpace.show()\n self.showMoreModel = True\n\n def selectModelFolder(self):\n self.modelFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Model\")\n\n # check if a trainingset is present\n # a trainingset needs to exist for every model, even if the model is already trained.\n trainingset_folder = os.path.join(self.modelFolder,'trainingset')\n if os.path.exists(trainingset_folder):\n flist_in = ioDT.get_image_list(trainingset_folder, string_filter='_GT', mode_filter='exclude')\n flist_in.sort()\n flist_gt = ioDT.get_image_list(trainingset_folder, string_filter='_GT', mode_filter='include')\n flist_gt.sort()\n\n if len(flist_in) == 0:\n QMessageBox.warning(self,'Warning, no trainingset!','Selected \"'+self.modelFolder+'\" but no trainingset *data* detected. Transfer some images in the \"trainingset\" folder.')\n self.modelFolder = '-'\n return\n if len(flist_in)!=len(flist_gt):\n QMessageBox.warning(self,'Warning, trainingset incomplete!','Selected \"'+self.modelFolder+'\" but not all masks have been created.\\nPlease provide manually annotated masks.')\n for f in flist_in:\n fn,ext = os.path.splitext(f)\n mask_name = fn+'_GT'+ext\n if not os.path.exists(mask_name):\n m = manualmask.makeManualMask(f, subfolder='',fn=fn+'_GT'+ext)\n # m.setModal(True)\n m.show()\n m.exec()\n # self.modelFolder = '-'\n # return\n else:\n QMessageBox.warning(self,'Warning, no trainingset!','Selected \"'+self.modelFolder+'\" but no \"trainingset\" folder detected.')\n self.modelFolder = '-'\n return\n # check if the model is already trained.\n # if not, only allow training button\n model_file = os.path.join(self.modelFolder,'scaler.pkl')\n if not os.path.exists(model_file):\n QMessageBox.warning(self,'Warning, train model!','Train the model before loading!\\nSetting default parameters...')\n else:\n self.loadModel()\n if self.classifier is None:\n return\n self.predictButton.setEnabled(True)\n self.recapButton.setEnabled(True)\n self.inspectButton.setEnabled(True)\n \n self.modelFolderSpace.setText(self.modelFolder)\n self.set_params()\n self.sigmasSpace.setEnabled(True)\n self.down_shapeSpace.setEnabled(True)\n self.edge_sizeSpace.setEnabled(True)\n self.fractionSpace.setEnabled(True)\n self.biasSpace.setEnabled(True)\n self.feature_modeSpace.setEnabled(True)\n self.trainButton.setEnabled(True)\n\n def set_params(self):\n self.sigmasSpace.setText(str(self.params['sigmas']))\n self.down_shapeSpace.setValue(self.params['down_shape'])\n self.edge_sizeSpace.setValue(self.params['edge_size'])\n self.fractionSpace.setValue(self.params['fraction'])\n self.biasSpace.setValue(self.params['bias'])\n self.feature_modeSpace.setCurrentIndex(['-','daisy','ilastik'].index(self.params['feature_mode']))\n self.feature_modeSpace.model().item(0).setEnabled(False)\n\n def read_and_check_params(self):\n s_str = self.sigmasSpace.text().replace(' ','').replace('[','').replace(']','')\n if s_str[-1]==',': s_str = s_str[:-1]\n self.params['sigmas'] = []\n for x in s_str.split(','):\n try:\n self.params['sigmas'].append(float(x))\n except:\n self.params['sigmas'].append(x)\n self.params['down_shape'] = self.down_shapeSpace.value()\n self.params['edge_size'] = self.edge_sizeSpace.value()\n self.params['fraction'] = self.fractionSpace.value()\n self.params['bias'] = self.biasSpace.value()\n self.params['feature_mode'] = self.feature_modeSpace.currentText()\n if not all(isinstance(x, float) for x in self.params['sigmas']):\n QMessageBox.warning(self,'Warning, values of sigmas not valid!','It seems there is at least one sigma that is not a number:\\n'+str(self.params['sigmas']))\n \n def trainModel(self, archBox):\n self.read_and_check_params()\n\n #############################################\n # load images to be used as training set\n #############################################\n training_folder = os.path.join(self.modelFolder,'trainingset')\n flist_in = ioDT.get_image_list(training_folder, string_filter='_GT', mode_filter='exclude')\n img_train = []\n for f in flist_in:\n img = imread(f)\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n img_train.append( img[0] )\n # img_train = np.array(img_train)\n\n flist_gt = ioDT.get_image_list(training_folder, string_filter='_GT', mode_filter='include')\n gt_train = [ imread(f) for f in flist_gt ]\n gt_train = [ g.astype(int) for g in gt_train ]\n\n print('##### Training set:')\n for i,f in enumerate(zip(flist_in,flist_gt)):\n print(i+1,'\\t', os.path.split(f[0])[-1],'\\t', os.path.split(f[1])[-1])\n\n #############################################\n # compute features and generate training set and weights\n #############################################\n\n print('##### Generating training set...')\n X, Y, w, self.scaler = train.generate_training_set( img_train, \n [g.astype(np.uint8) for g in gt_train], \n sigmas=self.params['sigmas'],\n down_shape=self.params['down_shape'],\n edge_size=self.params['edge_size'],\n fraction=self.params['fraction'],\n feature_mode=self.params['feature_mode'],\n bias=self.params['bias'] )\n\n #############################################\n # Train the model\n #############################################\n\n print('##### Training model...')\n start = time.time()\n self.classifier = train.train_classifier( X, Y, w, deep=self.deepModel.isChecked(), hidden=(350, 50) )\n print('Models trained in %.3f seconds.'%(time.time()-start))\n # print('classes_: ', self.classifier.classes_)\n # print('coef_: ', self.classifier.coef_)\n\n #############################################\n # Save the model\n #############################################\n\n ioML.save_model( self.modelFolder,\n self.classifier,\n self.scaler,\n sigmas=self.params['sigmas'],\n down_shape=self.params['down_shape'],\n edge_size=self.params['edge_size'],\n fraction=self.params['fraction'],\n feature_mode=self.params['feature_mode'],\n bias=self.params['bias'], deep=self.deepModel.isChecked() )\n print('##### Model saved!')\n self.predictButton.setEnabled(True)\n\n def loadModel(self):\n #############################################\n # load parameters and classifier\n #############################################\n print('##### Loading classifier model and parameters...')\n self.classifier, self.scaler, self.params = ioML.load_model( self.modelFolder, deep=self.deepModel.isChecked() )\n if self.classifier is None:\n QMessageBox.warning(self,'Warning!','Could not find any model')\n else:\n print('Success! Model loaded!')\n\n def selectImageFolder(self):\n self.imageFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Model\",\n \"C:\\\\Users\\\\nicol\\\\Desktop\\\\dmso\")\n if self.imageFolder == '':\n self.imageFolder = '-'\n return\n\n self.imageFolderSpace.setText(self.imageFolder)\n self.recapButton.setEnabled(True)\n self.inspectButton.setEnabled(True)\n self.maskFolderSpace.setText(self.imageFolder)\n # self.inspectButtonTL.setEnabled(True)\n\n def predict(self):\n #############################################\n # apply classifiers and save images\n #############################################\n\n result_folder = os.path.join(self.imageFolder,'result_segmentation')\n if not os.path.exists(result_folder):\n os.mkdir(result_folder)\n\n flist_in = ioDT.get_image_list(self.imageFolder)\n flist_in.sort()\n\n for f_in in flist_in:\n\n print('#'*20+'\\nLoading',f_in,'...')\n img = imread(f_in)\n if len(img.shape) == 2:\n img = np.expand_dims(img,0)\n if img.shape[-1] == np.min(img.shape):\n img = np.moveaxis(img, -1, 0)\n img = img[0]\n\n print('Predicting image...')\n pred, prob = predict.predict_image( img,\n self.classifier,\n self.scaler,\n sigmas=self.params['sigmas'],\n new_shape_scale=self.params['down_shape'],\n feature_mode=self.params['feature_mode'],\n deep=self.deepModel.isChecked() )\n\n # remove objects at the border\n negative = ndi.binary_fill_holes(pred==0)\n mask_pred = (pred==1)*negative\n edge_prob = ((2**16-1)*prob[2]).astype(np.uint16)\n mask_pred = mask_pred.astype(np.uint8)\n\n # save mask\n parent, filename = os.path.split(f_in)\n filename, file_extension = os.path.splitext(filename)\n new_name = os.path.join(parent,'result_segmentation',filename+'_classifier'+file_extension)\n imsave(new_name, pred, check_contrast=False)\n\n # perform watershed\n mask_final = predict.make_watershed( mask_pred,\n edge_prob,\n new_shape_scale=self.params['down_shape'] )\n\n # save final mask\n parent, filename = os.path.split(f_in)\n filename, file_extension = os.path.splitext(filename)\n new_name = os.path.join(parent,'result_segmentation',filename+'_watershed'+file_extension)\n imsave(new_name, mask_final, check_contrast=False)\n\n print('All images done!')\n\n def makeRecap(self):\n name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')\n if name != '':\n overviewML.generate_overview(self.imageFolder, saveFig=True, fileName=name, downshape=5)\n\n def openInspectionWindow(self):\n self.inspector = inspection.inspectionWindow_20max(self.imageFolder, parent=None, start=0, stop=20)\n self.inspector.show()\n\n def selectMaskFolder(self):\n self.maskFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Masks\",\n \"C:\\\\Users\\\\nicol\\\\Desktop\\\\dmso\")\n if self.maskFolder == '':\n self.maskFolder = self.imageFolder\n return\n\n self.maskFolderSpace.setText(self.maskFolder)\n\n\n '''\n Import masks if user has already created them\n '''\n\n def createImportGroup(self):\n self.importGroup = QGroupBox(\"\")\n \n ########## create buttons for import masks and images group ##############\n self.importGroup1 = QGroupBox(\"If masks are already present, import files.\")\n\n\n # self.instruct2 = QLabel('If masks are already generated, \\nselect image and mask folder here.') \n \n selectFolder = QPushButton(\"Specify image folder\")\n selectFolder.setFocusPolicy(Qt.NoFocus)\n selectFolder.clicked.connect( self.selectImportImageFolder )\n self.imageImportFolderSpace = QLineEdit()\n self.imageImportFolderSpace.setText(self.imageImportFolder)\n self.imageImportFolderSpace.setReadOnly(True)\n self.imageImportFolderSpace.setStyleSheet('color:gray;')\n\n selectMaskFolder = QPushButton(\"Specify mask folder\")\n selectMaskFolder.setFocusPolicy(Qt.NoFocus)\n selectMaskFolder.clicked.connect( self.selectMaskFolder )\n self.maskFolderSpace = QLineEdit(); self.maskFolderSpace.setText(self.maskFolder)\n self.maskFolderSpace.setReadOnly(True)\n self.maskFolderSpace.setStyleSheet('color:gray;')\n \n self.maskLabel = QLabel('File identifier of masks:')\n self.maskSpace = QLineEdit(); self.maskSpace.setText(\"\")\n\n self.isBorder = QCheckBox(\"Include objects at border of images\")\n self.isBorder.setChecked(False)\n \n\n self.importGroup2 = QGroupBox(\"\")\n\n self.importButton = QPushButton(\"Import Masks and Images\")\n self.trainButton.setFocusPolicy(Qt.NoFocus)\n self.importButton.clicked.connect(self.importImageMask)\n\n layout = QGridLayout()\n # layout.addWidget(self.instruct2, 0,0,1,2)\n layout.addWidget(selectFolder, 1,0,1,2)\n layout.addWidget(QLabel('Image folder:'), 2,0,1,1)\n layout.addWidget(self.imageImportFolderSpace,2,1,1,1)\n\n layout.addWidget(selectMaskFolder, 3,0,1,2)\n layout.addWidget(QLabel('Masks folder:'), 4,0,1,1)\n layout.addWidget(self.maskFolderSpace, 4,1,1,1)\n \n layout.addWidget(self.maskLabel, 5,0,1,1)\n layout.addWidget(self.maskSpace, 5,1,1,1)\n \n layout.addWidget(self.isBorder, 6,0,1,2)\n self.importGroup1.setLayout(layout)\n\n layout = QGridLayout()\n layout.addWidget(self.importButton, 0,0,1,2)\n self.importGroup2.setLayout(layout)\n\n layout = QVBoxLayout()\n layout.addWidget(self.importGroup1)\n layout.addWidget(self.importGroup2)\n self.importGroup.setLayout(layout)\n\n\n def selectImportImageFolder(self):\n self.imageImportFolder = QFileDialog.getExistingDirectory(self, \"Select Input Folder of Model\",\n \"C:\\\\Users\\\\nicol\\\\Desktop\\\\dmso\")\n if self.imageImportFolder == '':\n self.imageImportFolder = '-'\n return\n\n self.imageImportFolderSpace.setText(self.imageImportFolder)\n self.maskFolderSpace.setText(self.imageImportFolder)\n\n def importImageMask(self):\n objectsparser.parsing_images(self.imageImportFolder, \\\n self.maskFolder, self.maskSpace.text(), self.isBorder.isChecked())\n\n\n '''\n QUANTIFICATION TAB\n '''\n def createQuantificationTab(self):\n self.groups = []\n\n mainTab = QWidget()\n self.createGroup1()\n self.createGroup2()\n splitter = QSplitter(Qt.Vertical)\n splitter.addWidget(self.group1)\n splitter.addWidget(self.group2)\n \n mainTabLayout = QVBoxLayout() \n mainTabLayout.addWidget(splitter)\n mainTab.setLayout(mainTabLayout) \n return mainTab\n\n def group_checked(self, state, group):\n chs = []\n for ch in group.findChildren(QLabel):\n chs.append(ch)\n for ch in group.findChildren(QSpinBox):\n chs.append(ch)\n for ch in group.findChildren(QComboBox):\n chs.append(ch)\n for ch in group.findChildren(QPushButton):\n chs.append(ch)\n for ch in group.findChildren(QCheckBox):\n chs.append(ch) \n\n if not state:\n for ch in chs:\n ch.setVisible(False)\n else:\n for ch in chs:\n ch.setVisible(True)\n\n def createGroup1(self):\n self.group1 = QGroupBox(\"Groups\")\n self.group1.setCheckable(True)\n self.group1.toggled.connect(lambda state, x=self.group1: self.group_checked(state, x))\n\n self.tabs = QTabWidget()\n self.tabs.setTabsClosable(True)\n\n self.tabs.tabCloseRequested.connect(self.removeGroup)\n\n self.AddTabButton = QPushButton(\"Add New Group\")\n self.AddTabButton.clicked.connect(self.addGroup)\n self.addGroup()\n\n layout = QVBoxLayout()\n layout.addWidget(self.AddTabButton)\n layout.addWidget(self.tabs)\n self.group1.setLayout(layout)\n\n def addGroup(self):\n class FileDialog(QFileDialog):\n def __init__(self, *args):\n QFileDialog.__init__(self, *args)\n self.setOption(self.DontUseNativeDialog, True)\n self.setFileMode(self.DirectoryOnly)\n\n for view in self.findChildren((QListView, QTreeView)):\n if isinstance(view.model(), QFileSystemModel):\n view.setSelectionMode(QAbstractItemView.ExtendedSelection)\n\n class MyTable(QTableWidget):\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Delete:\n row = self.currentRow()\n self.removeRow(row)\n else:\n super().keyPressEvent(event)\n\n def addDataset():\n dialog = FileDialog()\n if dialog.exec_() == QDialog.Accepted:\n datasets = dialog.selectedFiles()\n else:\n return\n # print(dialog.selectedFiles())\n\n # dataset = QFileDialog.getExistingDirectory(self, \"Select dataset\")\n for dataset in datasets:\n if dataset!='':\n table = self.tabs.widget(self.tabs.currentIndex()).children()[1]\n rowPosition = table.rowCount()\n table.insertRow(rowPosition)\n table.setItem(rowPosition,0,QTableWidgetItem(dataset))\n\n newTab = QWidget()\n\n table = MyTable()\n table.insertColumn(0)\n selectFolder = QPushButton(\"Select new dataset\")\n selectFolder.clicked.connect(addDataset)\n\n tablayout = QGridLayout()\n tablayout.addWidget(table,0,0,1,2)\n tablayout.addWidget(selectFolder,1,0,1,2)\n newTab.setLayout(tablayout)\n\n # n = self.tabs.tabText(self.tabs.count()-1)\n # \n self.tabs.addTab(newTab, 'Group '+str(self.tabs.count()+1))\n\n # print(self.tabs.widget(self.tabs.count()-1).children())\n\n # return tab\n \n def removeGroup(self,index):\n self.tabs.removeTab(index)\n\n def selectAllButtonClicked(self):\n if self.selectAllButton.isChecked():\n self.morphoType.setEnabled(False)\n # self.maskType.setEnabled(False)\n else:\n self.morphoType.setEnabled(True)\n # self.maskType.setEnabled(True)\n\n def createGroup2(self):\n self.group2 = QGroupBox(\"\")\n\n self.isTimelapse = QCheckBox(\"Timelapse data\")\n self.isTimelapse.setChecked(False)\n\n def buildGroupVis():\n group = QGroupBox(\"Visualization functions\")\n group.setCheckable(True)\n group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n group.setChecked(False)\n\n compositeButton = QPushButton(\"Create overview composite\")\n compositeButton.clicked.connect(self.createCompositeOverviewAll)\n\n meshgridButton = QPushButton(\"Create meshgrid overview\")\n meshgridButton.clicked.connect(self.createMeshgridOverviewAll)\n\n layout = QVBoxLayout()\n layout.addWidget(compositeButton)\n layout.addWidget(meshgridButton)\n group.setLayout(layout)\n self.group_checked(False, group)\n\n return group\n\n def buildGroupMorpho():\n group = QGroupBox(\"Morphology quantification\")\n group.setCheckable(True)\n group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n group.setChecked(False)\n\n self.maskType = QComboBox()\n self.maskType.addItem(\"Unprocessed\")\n self.maskType.addItem(\"Straightened\")\n\n self.morphoKeys = [\n 'area',\n 'eccentricity',\n 'major_axis_length',\n 'minor_axis_length',\n 'equivalent_diameter',\n 'perimeter',\n 'euler_number',\n 'extent',\n 'form_factor',\n # 'inertia_tensor',\n # 'inertia_tensor_eigvals',\n # 'moments',\n # 'moments_central',\n # 'moments_hu',\n # 'moments_normalized',\n 'orientation',\n 'locoefa_coeff'\n ]\n self.datamorphotype = [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n # 0,\n # 0,\n # 0,\n # 0,\n # 0,\n # 0,\n 0,\n 1,\n ]\n self.morphoType = QComboBox()\n for key in self.morphoKeys:\n self.morphoType.addItem(key)\n\n self.selectAllButton = QCheckBox(\"Use all parameters\")\n self.selectAllButton.clicked.connect(self.selectAllButtonClicked)\n\n morphologyButton = QPushButton(\"Visualize Morphological Parameter(s)\")\n morphologyButton.clicked.connect(self.createMorphologyPlot)\n\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Type of mask:\"), 1,0,1,1)\n layout.addWidget(self.maskType, 1,1,1,1)\n layout.addWidget(QLabel(\"Morphological parameter\"), 2,0,1,1)\n layout.addWidget(self.morphoType, 2,1,1,1)\n layout.addWidget(self.selectAllButton, 3,0,1,2)\n layout.addWidget(morphologyButton, 4,0,1,2)\n group.setLayout(layout)\n self.group_checked(False, group)\n\n return group\n\n def buildGroupFluo():\n group = QGroupBox(\"Fluorescence quantification\")\n group.setCheckable(True)\n group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n group.setChecked(False)\n\n self.fluorescenceChannel = QSpinBox()\n self.fluorescenceChannel.setRange(0,100)\n self.fluorescenceChannel.setAlignment(Qt.AlignRight)\n\n self.spatialType = QComboBox()\n self.spatialType.addItem('Average')\n self.spatialType.addItem('Antero-Posterior profile')\n self.spatialType.addItem('Left-Right profile')\n self.spatialType.addItem('Radial profile')\n self.spatialType.addItem('Angular profile')\n\n computeButton = QPushButton(\"Compute graph\")\n computeButton.clicked.connect(self.createFluoGraph)\n\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Fluorescence channel:\"), 0,0,1,1)\n layout.addWidget(self.fluorescenceChannel, 0,1,1,1)\n layout.addWidget(QLabel(\"Spatial profile type:\"), 2,0,1,1)\n layout.addWidget(self.spatialType, 2,1,1,1)\n layout.addWidget(computeButton, 3,0,1,2)\n group.setLayout(layout)\n self.group_checked(False, group)\n\n return group\n\n # def buildGroupSpots():\n # group = QGroupBox(\"Spots quantification\")\n # group.setCheckable(True)\n # group.toggled.connect(lambda state, x=group: self.group_checked(state, x))\n # group.setChecked(False)\n\n # self.spotsFluorescenceChannel = QSpinBox()\n # self.spotsFluorescenceChannel.setRange(0,100)\n # self.spotsFluorescenceChannel.setAlignment(Qt.AlignRight)\n\n # self.spotsSpatialType = QComboBox()\n # self.spotsSpatialType.addItem('Average')\n # self.spotsSpatialType.addItem('Antero-Posterior profile')\n # self.spotsSpatialType.addItem('Left-Right profile')\n # self.spotsSpatialType.addItem('Radial profile')\n # self.spotsSpatialType.addItem('Angular profile')\n\n # self.spotsCountRadio = QPushButton(\"Spot count\")\n # self.spotsCountRadio.clicked.connect(self.makeSpotCountPlot)\n\n # # # self.spotsPositionRadio = QCheckBox(\"Position\")\n # # self.spotsAreaRadio = QCheckBox(\"Area\")\n # # self.spotaPerimeterRadio = QCheckBox(\"Perimeter\")\n # # self.spotsMajorAxisRadio = QCheckBox('Major axis')\n # # self.spotsMinorAxisRadio = QCheckBox('Minor Axis')\n # # self.spotsEccetricityRadio = QCheckBox('Eccentricity')\n # # self.spotsEftRadio = QCheckBox('Elliptical Fourier Transform')\n # # self.spotsOrientationRadio = QCheckBox('Orientation')\n # # self.spotsFluoRadio = QCheckBox('Fluorescence intensity')\n\n # # spotsButton = QPushButton(\"Compute graph\")\n # # spotsButton.clicked.connect(self.createSpotsGraphAll)\n\n # layout = QGridLayout()\n # layout.addWidget(QLabel('Fluorescence channel:'), 0,0,1,1)\n # layout.addWidget(self.spotsFluorescenceChannel, 0,1,1,1)\n # layout.addWidget(QLabel('Spatial profile type:'), 1,0,1,1)\n # layout.addWidget(self.spotsSpatialType, 1,1,1,1)\n # layout.addWidget(self.spotsCountRadio, 2,0,1,2)\n # # layout.addWidget(self.spotsAreaRadio, 3,0,1,1)\n # # layout.addWidget(self.spotaPerimeterRadio, 3,1,1,1)\n # # layout.addWidget(self.spotsMajorAxisRadio, 4,0,1,1)\n # # layout.addWidget(self.spotsMinorAxisRadio, 4,1,1,1)\n # # layout.addWidget(self.spotsEftRadio, 5,0,1,1)\n # # layout.addWidget(self.spotsOrientationRadio,5,1,1,1)\n # # layout.addWidget(self.spotsFluoRadio, 6,0,1,1)\n # # layout.addWidget(spotsButton, 7,0,1,2)\n # group.setLayout(layout)\n # self.group_checked(False, group)\n\n # return group\n\n groupVis = buildGroupVis()\n groupMorpho = buildGroupMorpho()\n groupFluo = buildGroupFluo()\n # groupSpots = buildGroupSpots()\n \n layout = QGridLayout()\n layout.addWidget(self.isTimelapse, 0,0,1,1)\n layout.addWidget(groupVis, 2,0,1,2)\n layout.addWidget(groupMorpho, 3,0,1,2)\n layout.addWidget(groupFluo, 4,0,1,2)\n # layout.addWidget(groupSpots, 5,0,1,2)\n self.group2.setLayout(layout)\n\n def createCompositeOverviewAll(self):\n\n # for every group\n folders = []\n for i in range(self.tabs.count()):\n # extract table in the group\n children = self.tabs.widget(i).children()\n table = children[1]\n # extract folders (dataset) in the table\n for j in range(table.rowCount()):\n folder = table.item(j,0).text()\n folders.append(folder)\n overviewDT.createCompositeOverview(folder)\n # print(folders)\n\n file = '_composite_recap.tif/.png'\n text = 'Composite files saved at:'\n for f in folders:\n parent,cond = os.path.split(f)\n text = text + '\\n\\t'+os.path.join(os.path.split(parent)[-1],'result_segmentation', cond + file)\n QMessageBox.information(self,\"Completed successfully\",text)\n\n def createMeshgridOverviewAll(self):\n\n # for every group\n for i in range(self.tabs.count()):\n # extract table in the group\n children = self.tabs.widget(i).children()\n table = children[1]\n # extract folders (dataset) in the table\n folders = []\n for j in range(table.rowCount()):\n folder = table.item(j,0).text()\n folders.append(folder)\n overviewDT.createMeshgridOverview(folder)\n # print(folders)\n\n file = '_meshgrid_recap.png'\n text = 'Meshgrid files saved at:'\n for f in folders:\n parent,cond = os.path.split(f)\n text = text + '\\n\\t'+os.path.join(os.path.split(parent)[-1],'result_segmentation', cond + file)\n QMessageBox.information(self,\"Completed successfully\",text)\n\n def createMorphologyPlot(self):\n\n computeMorpho = [False for key in self.morphoKeys]\n computeMorpho[self.morphoType.currentIndex()] = True\n if self.selectAllButton.isChecked():\n computeMorpho = [True for key in self.morphoKeys]\n\n # extract all folders to compute\n folders = [[] for i in range(self.tabs.count())]\n for i in range(self.tabs.count()):\n children = self.tabs.widget(i).children()\n table = children[1]\n for j in range(table.rowCount()):\n folders[i].append( table.item(j,0).text() )\n\n # extract data from all the folders\n data_all, keys = arrangemorphodata.collect_morpho_data( \n folders, \n self.morphoKeys, \n computeMorpho, \n self.maskType.currentText(), \n self.isTimelapse.isChecked()\n )\n\n # for every quantification parameter, make the appropriate plot\n for key in keys:\n data_key = [data[key] for data in data_all]\n # print(data_key)\n\n # find out number of dimensions of the data_key object by going deeper in the object\n # and checking if the first item of layer n is iterable\n iterable = True\n ndim = 0\n first_object = data_key[0][0]\n while iterable:\n iterable = isinstance(first_object, Iterable)\n if iterable:\n ndim += 1\n first_object = first_object[0]\n \n # call the right visualization tool according to the number of dimensions\n ### clean up quantifier handler:\n self.quantifier = [self.quantifier[i] for i in range(len(self.quantifier)) if self.quantifier[i] is not None]\n\n if ndim == 0:\n self.quantifier.append( visualize0d.visualization_0d( data_key, key ) )\n self.quantifier[-1].show()\n elif ndim == 1:\n self.quantifier.append( visualize1d.visualization_1d( data_key, key ) )\n self.quantifier[-1].show()\n elif ndim == 2:\n self.quantifier.append( visualize2d.visualization_2d( data_key, key ) )\n self.quantifier[-1].show()\n \n def createFluoGraph(self):\n # print('createFluoGraph')\n # return\n\n # extract all folders to compute\n folders = [[] for i in range(self.tabs.count())]\n for i in range(self.tabs.count()):\n children = self.tabs.widget(i).children()\n table = children[1]\n for j in range(table.rowCount()):\n folders[i].append( table.item(j,0).text() )\n \n channel = self.fluorescenceChannel.value()\n distributionType = ['Average','APprofile','LRprofile','RADprofile','ANGprofile'][self.spatialType.currentIndex()]\n\n # extract data from all the folders\n data_all = arrangefluodata.collect_fluo_data( \n folders, \n channel, \n distributionType, \n self.isTimelapse.isChecked()\n )\n\n # if the result is None, something went wrong!\n if not data_all:\n QMessageBox.warning(self,'Warning, invalid channel!','The channel selected doesn\\'t appear in the raw data!')\n return\n\n # print(data_all)\n # make the appropriate plot\n data_key = [data['ch%d_%s'%(channel,distributionType)] for data in data_all]\n data_bckg = [data['ch%d_Background'%(channel)] for data in data_all]\n\n # find out number of dimensions of the data_key object by going deeper in the object\n # and checking if the first item of layer n is iterable\n iterable = True\n ndim = 0\n first_object = data_key[0][0]\n while iterable:\n iterable = isinstance(first_object, Iterable)\n if iterable:\n ndim += 1\n first_object = first_object[0]\n\n # call the right visualization tool according to the number of dimensions\n ### clean up quantifier handler:\n self.quantifier = [self.quantifier[i] for i in range(len(self.quantifier)) if self.quantifier[i] is not None]\n\n if ndim == 0:\n self.quantifier.append( visualize0d.visualization_0d( data_key, distributionType, background=data_bckg ) )\n self.quantifier[-1].show()\n elif ndim == 1:\n self.quantifier.append( visualize1d.visualization_1d( data_key, distributionType, background=data_bckg ) )\n self.quantifier[-1].show()\n elif ndim == 2:\n self.quantifier.append( visualize2d.visualization_2d( data_key, distributionType, background=data_bckg ) )\n self.quantifier[-1].show()\n\n def makeSpotCountPlot(self):\n # print('createFluoGraph')\n # return\n\n # extract all folders to compute\n folders = [[] for i in range(self.tabs.count())]\n for i in range(self.tabs.count()):\n children = self.tabs.widget(i).children()\n table = children[1]\n for j in range(table.rowCount()):\n folders[i].append( table.item(j,0).text() )\n\n # if self.spotsSpatialType.currentText()=='Average':\n # data_all = utils_quantify.collect_spots_data_from_folders(folders,spatialDistNeeded='count')\n # if not data_all:\n # return\n # utils_quantify.computeAndPlotMorphoAll(data_all,['count'],[True],\n # int(self.spotsFluorescenceChannel.value()),\n # self.isTimelapse.isChecked(),\n # style=self.plotType.currentText())\n\n # else:\n # ### plot the AP profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Antero-Posterior profile':\n # key1, key2 = 'APposition', 'APprofile'\n # ### plot the LR profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Left-Right profile':\n # key1, key2 = 'LRposition', 'LRprofile'\n # ### plot the radial profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Radial profile':\n # key1, key2 = 'RADposition', 'RADprofile'\n # ### plot the radial profile of the fluorescence in the mask\n # if self.spotsSpatialType.currentText()=='Angular profile':\n # key1, key2 = 'ANGposition', 'ANGprofile'\n\n # data_all = utils_quantify.collect_spots_data_from_folders(folders,spatialDistNeeded=key1)\n # if not data_all:\n # return\n # data_all = multi_objects_functions.convert_to_distribution(data_all,'count')\n # utils_quantify.computeProfileAll( data_all,\n # channel = int(self.spotsFluorescenceChannel.value()),\n # isTimelapse = self.isTimelapse.isChecked(),\n # profileType = key2,\n # ylabel='Cell count' )\n\n def createSpotsGraphAll(self):\n print('createSpotsGraphAll')\n return\n \n # params = ['area','perimeter',\n # 'major_axis_length','minor_axis_length','eccentricity',\n # 'elliptical_fourier_transform','orientation','mean_intensity']\n\n # toplot = [False for i in params]\n # if self.spotsAreaRadio.isChecked(): toplot[0]=True\n # if self.spotaPerimeterRadio.isChecked(): toplot[1]=True\n # if self.spotsMajorAxisRadio.isChecked(): toplot[2]=True\n # if self.spotsMinorAxisRadio.isChecked(): toplot[3]=True\n # if self.spotsEccetricityRadio.isChecked(): toplot[4]=True\n # if self.spotsEftRadio.isChecked(): toplot[5]=True\n # if self.spotsOrientationRadio.isChecked(): toplot[6]=True\n # if self.spotsFluoRadio.isChecked(): toplot[7]=True\n\n # # extract all folders to compute\n # folders = [[] for i in range(self.tabs.count())]\n # for i in range(self.tabs.count()):\n\n # children = self.tabs.widget(i).children()\n # table = children[1]\n # for j in range(table.rowCount()):\n # folders[i].append( table.item(j,0).text() )\n\n # if self.spotsSpatialType.currentText()=='Average':\n # print('To be implemented!')\n # # data_all = utils_quantify.collect_spots_data_from_folders(folders,spatialDistNeeded='Average')\n # # success = utils_quantify.computeAndPlotMorphoAll(data_all,params,t,int(self.fluorescenceChannel.value()),self.isTimelapse.isChecked())\n\n # if self.spotsSpatialType.currentText()=='Antero-Posterior profile':\n # print(\"To be implemented!\")\n # # self.createAPprofileAll_spots(folders)\n # # data_all = utils_quantify.collect_fluo_data_from_folders(folders,spatialDistNeeded='APprofile')\n # # success = utils_quantify.computeProfileAll(data_all,int(self.fluorescenceChannel.value()),self.isTimelapse.isChecked())\n\n # if self.spotsSpatialType.currentText()=='Radial profile':\n # print(\"To be implemented!\")\n # # data_all = utils_quantify.collect_fluo_data_from_folders(folders,spatialDistNeeded='RadialProfile')\n # # success = utils_quantify.computeProfileAll(data_all,int(self.fluorescenceChannel.value()),self.isTimelapse.isChecked())\n\n'''\nrun the main gui from the current file\n'''\nif __name__ == '__main__':\n def run():\n app = QApplication(sys.argv)\n gallery = morganaApp()\n gallery.show()\n sys.exit(app.exec_())\n\n run()\n", "sub_path": "morgana/GUIs/mainwindow.py", "file_name": "mainwindow.py", "file_ext": "py", "file_size_in_byte": 50636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 38, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.setStyle", "line_number": 78, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 78, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 86, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 95, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 119, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 121, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 122, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 122, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 124, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 127, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 130, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 131, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 135, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDoubleSpinBox", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 141, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSpinBox", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 145, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDoubleSpinBox", "line_number": 146, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 149, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDoubleSpinBox", "line_number": 150, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 153, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 154, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 159, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 161, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 161, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 165, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 167, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 168, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 168, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 170, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 174, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 175, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 175, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 179, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 180, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 180, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 185, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 185, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 190, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 194, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 216, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 219, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 228, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 280, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 280, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path", "line_number": 284, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path", "line_number": 285, "usage_type": "attribute"}, {"api_name": "morgana.DatasetTools.io.get_image_list", "line_number": 286, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io", "line_number": 286, "usage_type": "name"}, {"api_name": "morgana.DatasetTools.io.get_image_list", "line_number": 288, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io", "line_number": 288, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 292, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 292, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 296, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 296, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 298, "usage_type": "call"}, {"api_name": "os.path", "line_number": 298, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "morgana.GUIs.manualmask.makeManualMask", "line_number": 301, "usage_type": "call"}, {"api_name": "morgana.GUIs.manualmask", "line_number": 301, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 308, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 308, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path", "line_number": 314, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 315, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 315, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 358, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 358, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "morgana.DatasetTools.io.get_image_list", "line_number": 367, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io", "line_number": 367, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 374, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io.get_image_list", "line_number": 378, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io", "line_number": 378, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 379, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path", "line_number": 384, "usage_type": "attribute"}, {"api_name": "morgana.MLModel.train.generate_training_set", "line_number": 391, "usage_type": "call"}, {"api_name": "morgana.MLModel.train", "line_number": 391, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 392, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 405, "usage_type": "call"}, {"api_name": "morgana.MLModel.train.train_classifier", "line_number": 406, "usage_type": "call"}, {"api_name": "morgana.MLModel.train", "line_number": 406, "usage_type": "name"}, {"api_name": "time.time", "line_number": 407, "usage_type": "call"}, {"api_name": "morgana.MLModel.io.save_model", "line_number": 415, "usage_type": "call"}, {"api_name": "morgana.MLModel.io", "line_number": 415, "usage_type": "name"}, {"api_name": "morgana.MLModel.io.load_model", "line_number": 432, "usage_type": "call"}, {"api_name": "morgana.MLModel.io", "line_number": 432, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 434, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 434, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 439, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 439, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 458, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io.get_image_list", "line_number": 460, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.io", "line_number": 460, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 470, "usage_type": "call"}, {"api_name": "morgana.MLModel.predict.predict_image", "line_number": 474, "usage_type": "call"}, {"api_name": "morgana.MLModel.predict", "line_number": 474, "usage_type": "name"}, {"api_name": "scipy.ndimage.binary_fill_holes", "line_number": 483, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 483, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 485, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 486, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 489, "usage_type": "call"}, {"api_name": "os.path", "line_number": 489, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path", "line_number": 490, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 491, "usage_type": "call"}, {"api_name": "os.path", "line_number": 491, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 492, "usage_type": "call"}, {"api_name": "morgana.MLModel.predict.make_watershed", "line_number": 495, "usage_type": "call"}, {"api_name": "morgana.MLModel.predict", "line_number": 495, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path", "line_number": 500, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 501, "usage_type": "call"}, {"api_name": "os.path", "line_number": 501, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 502, "usage_type": "call"}, {"api_name": "os.path", "line_number": 502, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 503, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getSaveFileName", "line_number": 508, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 508, "usage_type": "name"}, {"api_name": "morgana.MLModel.overview.generate_overview", "line_number": 510, "usage_type": "call"}, {"api_name": "morgana.MLModel.overview", "line_number": 510, "usage_type": "name"}, {"api_name": "morgana.GUIs.inspection.inspectionWindow_20max", "line_number": 513, "usage_type": "call"}, {"api_name": "morgana.GUIs.inspection", "line_number": 513, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 517, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 517, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 531, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 534, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 539, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 540, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 540, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 542, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 547, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 548, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 548, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 550, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 554, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 555, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 557, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 561, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 563, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.NoFocus", "line_number": 564, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 564, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 567, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 570, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 574, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 583, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 587, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 594, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 594, "usage_type": "name"}, {"api_name": "morgana.ImageTools.objectsparsing.objectsparser.parsing_images", "line_number": 604, "usage_type": "call"}, {"api_name": "morgana.ImageTools.objectsparsing.objectsparser", "line_number": 604, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 614, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSplitter", "line_number": 617, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Vertical", "line_number": 617, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 617, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 621, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 628, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QSpinBox", "line_number": 630, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 632, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 634, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 636, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 647, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTabWidget", "line_number": 651, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 656, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 660, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 666, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.__init__", "line_number": 668, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 668, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QListView", "line_number": 672, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTreeView", "line_number": 672, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileSystemModel", "line_number": 673, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.ExtendedSelection", "line_number": 674, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 674, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 676, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Key_Delete", "line_number": 678, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 678, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog.Accepted", "line_number": 686, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 686, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 698, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 700, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 704, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 707, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 732, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 734, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 738, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 743, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 746, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 749, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 758, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 763, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 804, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 808, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 811, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 814, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 815, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 817, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 827, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QSpinBox", "line_number": 832, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignRight", "line_number": 834, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 834, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 836, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 843, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 846, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 847, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 849, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 914, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.morphology.overview.createCompositeOverview", "line_number": 934, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.morphology.overview", "line_number": 934, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 940, "usage_type": "call"}, {"api_name": "os.path", "line_number": 940, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 941, "usage_type": "call"}, {"api_name": "os.path", "line_number": 941, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 941, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.information", "line_number": 942, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 942, "usage_type": "name"}, {"api_name": "morgana.DatasetTools.morphology.overview.createMeshgridOverview", "line_number": 956, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.morphology.overview", "line_number": 956, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 962, "usage_type": "call"}, {"api_name": "os.path", "line_number": 962, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 963, "usage_type": "call"}, {"api_name": "os.path", "line_number": 963, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 963, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.information", "line_number": 964, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 964, "usage_type": "name"}, {"api_name": "morgana.DatasetTools.arrangemorphodata.collect_morpho_data", "line_number": 982, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.arrangemorphodata", "line_number": 982, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 1001, "usage_type": "argument"}, {"api_name": "morgana.GUIs.visualize0d.visualization_0d", "line_number": 1011, "usage_type": "call"}, {"api_name": "morgana.GUIs.visualize0d", "line_number": 1011, "usage_type": "name"}, {"api_name": "morgana.GUIs.visualize1d.visualization_1d", "line_number": 1014, "usage_type": "call"}, {"api_name": "morgana.GUIs.visualize1d", "line_number": 1014, "usage_type": "name"}, {"api_name": "morgana.GUIs.visualize2d.visualization_2d", "line_number": 1017, "usage_type": "call"}, {"api_name": "morgana.GUIs.visualize2d", "line_number": 1017, "usage_type": "name"}, {"api_name": "morgana.DatasetTools.arrangefluodata.collect_fluo_data", "line_number": 1036, "usage_type": "call"}, {"api_name": "morgana.DatasetTools.arrangefluodata", "line_number": 1036, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 1045, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 1045, "usage_type": "name"}, {"api_name": "collections.abc.Iterable", "line_number": 1059, "usage_type": "argument"}, {"api_name": "morgana.GUIs.visualize0d.visualization_0d", "line_number": 1069, "usage_type": "call"}, {"api_name": "morgana.GUIs.visualize0d", "line_number": 1069, "usage_type": "name"}, {"api_name": "morgana.GUIs.visualize1d.visualization_1d", "line_number": 1072, "usage_type": "call"}, {"api_name": "morgana.GUIs.visualize1d", "line_number": 1072, "usage_type": "name"}, {"api_name": "morgana.GUIs.visualize2d.visualization_2d", "line_number": 1075, "usage_type": "call"}, {"api_name": "morgana.GUIs.visualize2d", "line_number": 1075, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1171, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1171, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1174, "usage_type": "call"}]} +{"seq_id": "167063079", "text": "from django.conf.urls import *\nfrom quests.models import *\nfrom django.contrib import admin\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^quests/', include('quests.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\n#To handle static html problems\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n )\n", "sub_path": "gamify/gamify/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "84957099", "text": "######################################################################################################\n#\n# Organization: Asociacion De Investigacion En Inteligencia Artificial Para La Leucemia Peter Moss\n# Project: UP2 NCS1 Realsense F200 Facial Recognition Security System\n#\n# Author: Adam Milton-Barker (AdamMiltonBarker.com)\n#\n# Title: CamRead Class\n# Description: Reads frames from an F200 camera and streams them to a socket stream.\n# License: MIT License\n# Last Modified: 2020-09-29\n#\n######################################################################################################\n\nimport base64\nimport cv2\nimport dlib\nimport os\nimport sys\nimport time\n\nimport numpy as np\n\nfrom datetime import datetime\nfrom imutils import face_utils\nfrom threading import Thread\n\nfrom Classes.Helpers import Helpers\nfrom Classes.iotJumpWay import Device as iot\nfrom Classes.TassAI import TassAI\n\nimport pyrealsense as pyrs\nfrom pyrealsense.constants import rs_option\n\nclass CamRead(Thread):\n\t\"\"\" CamRead Class\n\n\tReads frames from a Realsense F200 camera and streams them\n\tto a socket stream.\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" Initializes the class. \"\"\"\n\n\t\tself.Helpers = Helpers(\"CamRead\")\n\t\tsuper(CamRead, self).__init__()\n\n\t\tself.Helpers.logger.info(\"CamRead class initialized.\")\n\n\tdef run(self):\n\t\t\"\"\" Runs the module. \"\"\"\n\n\t\tself.identified = 0\n\n\t\t# Starts the TassAI module\n\t\tself.TassAI = TassAI()\n\t\tself.TassAI.cv()\n\t\tself.TassAI.ncs()\n\n\t\t# Starts the socket server\n\t\tsoc = self.Sockets.connect(self.Helpers.confs[\"Socket\"][\"host\"], self.Helpers.confs[\"Socket\"][\"port\"])\n\n\t\tfps = \"\"\n\t\tframecount = 0\n\t\tcount = 0\n\t\ttime1 = 0\n\t\ttime2 = 0\n\n\t\tself.publishes = [None] * (len(self.TassAI.NCS1.encoded) + 1)\n\n\t\twith pyrs.Service() as serv:\n\t\t\twith serv.Device() as dev:\n\n\t\t\t\tdev.apply_ivcam_preset(0)\n\n\t\t\t\twhile True:\n\t\t\t\t\tt1 = time.perf_counter()\n\n\t\t\t\t\tdev.wait_for_frames()\n\t\t\t\t\tframe = dev.color\n\t\t\t\t\tframe = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n\t\t\t\t\t# Processes the frame\n\t\t\t\t\traw, frame = self.TassAI.NCS1.prepareImg(frame)\n\t\t\t\t\twidth = frame.shape[1]\n\n\t\t\t\t\t# Gets faces and coordinates\n\t\t\t\t\tfaces, coords = self.TassAI.NCS1.faces(frame)\n\n\t\t\t\t\t# Writes header to frame\n\t\t\t\t\tcv2.putText(frame, \"TassAI\", (10, 30), self.TassAI.font,\n\t\t\t\t\t\t\t\t\t\t0.7, self.TassAI.color, 2, cv2.LINE_AA)\n\n\t\t\t\t\t# Writes date to frame\n\t\t\t\t\tcv2.putText(frame, str(datetime.now()), (10, 50),\n\t\t\t\t\t\t\t\t\t\t\tself.TassAI.font, 0.5, self.TassAI.color, 2, cv2.LINE_AA)\n\n\t\t\t\t\tif len(coords):\n\t\t\t\t\t\ti = 0\n\t\t\t\t\t\tmesg = \"\"\n\t\t\t\t\t\t# Loops through coordinates\n\t\t\t\t\t\tfor (i, face) in enumerate(coords):\n\n\t\t\t\t\t\t\t# Gets facial landmarks coordinates\n\t\t\t\t\t\t\tcoordsi = face_utils.shape_to_np(face)\n\t\t\t\t\t\t\t# Looks for matches/intruders\n\t\t\t\t\t\t\tknown, distance = self.TassAI.NCS1.match(raw, faces[i])\n\n\t\t\t\t\t\t\tif known:\n\t\t\t\t\t\t\t\tmesg = \"TassAI identified User #\" + str(known)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmesg = \"TassAI identified intruder\"\n\n\t\t\t\t\t\t\t# If iotJumpWay publish for user is in past\n\t\t\t\t\t\t\tif (self.publishes[int(known)] is None or (self.publishes[int(known)] + (1 * 20)) < time.time()):\n\t\t\t\t\t\t\t\t# Update publish time for user\n\t\t\t\t\t\t\t\tself.publishes[int(known)] = time.time()\n\n\t\t\t\t\t\t\t\t# Send iotJumpWay notification\n\t\t\t\t\t\t\t\tself.iot.channelPub(\"Sensors\", {\n\t\t\t\t\t\t\t\t\t\"Type\": \"TassAI\",\n\t\t\t\t\t\t\t\t\t\"Sensor\": \"F200 Camera\",\n\t\t\t\t\t\t\t\t\t\"Value\": known,\n\t\t\t\t\t\t\t\t\t\"Message\": mesg\n\t\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\t\t# Send iotJumpWay notification\n\t\t\t\t\t\t\t\tself.iot.channelPub(\"Cameras\", {\n\t\t\t\t\t\t\t\t\t\"Type\": \"TassAI\",\n\t\t\t\t\t\t\t\t\t\"Sensor\": \"F200 Camera\",\n\t\t\t\t\t\t\t\t\t\"Value\": known,\n\t\t\t\t\t\t\t\t\t\"Message\": mesg\n\t\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\t(x, y, w, h) = self.TassAI.NCS1.bounding_box(faces[i])\n\t\t\t\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)\n\n\t\t\t\t\t\t\tcx = int(round(+(w/2)))\n\t\t\t\t\t\t\tcy = int(round(y+(h/2)))\n\n\t\t\t\t\t\t\tcv2.putText(frame, \"User ID#\"+str(known), (x, y - 5),\n\t\t\t\t\t\t\t\t\t\tcv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\n\n\t\t\t\t\t\t\tdistance = dev.depth[cy][cx]/1000.0\n\t\t\t\t\t\t\tif(distance != 0.0):\n\t\t\t\t\t\t\t\tcv2.putText(frame, str(distance) + \"cm\", (x + (w - 20), y - 5),\n\t\t\t\t\t\t\t\t\t\tcv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\n\n\n\t\t\t\t\t\t\t# Draws facial landmarks\n\t\t\t\t\t\t\tfor (x, y) in coordsi:\n\t\t\t\t\t\t\t\tcv2.circle(frame, (x, y), 2, (0, 255, 0), -1)\n\t\t\t\t\t\t\t# Adds user name to frame\n\t\t\t\t\t\t\ti += 1\n\n\t\t\t\t\tcv2.putText(frame, fps, (width-170, 30), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t\t\t\t\t\t\t0.5, self.TassAI.color, 1, cv2.LINE_AA)\n\n\t\t\t\t\td = dev.depth * dev.depth_scale * 1000\n\t\t\t\t\td = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW)\n\n\t\t\t\t\tcd = np.concatenate((frame, d), axis=1)\n\n\t\t\t\t\t# Streams the modified frame to the socket server\n\t\t\t\t\tencoded, buffer = cv2.imencode('.jpg', cd)\n\t\t\t\t\tsoc.send(base64.b64encode(buffer))\n\n\t\t\t\t\t# FPS calculation\n\t\t\t\t\tframecount += 1\n\t\t\t\t\tif framecount >= 15:\n\t\t\t\t\t\tfps = \"Stream: {:.1f} FPS\".format(time1/15)\n\t\t\t\t\t\tframecount = 0\n\t\t\t\t\t\ttime1 = 0\n\t\t\t\t\t\ttime2 = 0\n\t\t\t\t\tt2 = time.perf_counter()\n\t\t\t\t\telapsedTime = t2-t1\n\t\t\t\t\ttime1 += 1/elapsedTime\n\t\t\t\t\ttime2 += elapsedTime\n\t\t\t\t\ttime.sleep(0.05)\n", "sub_path": "UP2/NCS1/Realsense/F200/Classes/CamRead.py", "file_name": "CamRead.py", "file_ext": "py", "file_size_in_byte": 4967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "threading.Thread", "line_number": 35, "usage_type": "name"}, {"api_name": "Classes.Helpers.Helpers", "line_number": 45, "usage_type": "call"}, {"api_name": "Classes.TassAI.TassAI", "line_number": 56, "usage_type": "call"}, {"api_name": "pyrealsense.Service", "line_number": 71, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "name"}, {"api_name": "cv2.LINE_AA", "line_number": 96, "usage_type": "attribute"}, {"api_name": "imutils.face_utils.shape_to_np", "line_number": 105, "usage_type": "call"}, {"api_name": "imutils.face_utils", "line_number": 105, "usage_type": "name"}, {"api_name": "time.time", "line_number": 115, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 136, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 141, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 142, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 147, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 157, "usage_type": "attribute"}, {"api_name": "cv2.applyColorMap", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 160, "usage_type": "attribute"}, {"api_name": "cv2.COLORMAP_RAINBOW", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 165, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 166, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 175, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "115024248", "text": "import cv2\r\nimport os\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\n_BATCH_SIZE = 6\r\n\r\n\r\n# this function try to read data folder and construct the list file containt path to image\r\n# output list file have structure LIST = [[current_frame_path, reference_frame_path, groundtruth_frame_path],\r\n# [current_frame_path, reference_frame_path, groundtruth_frame_path],\r\n# ....\r\n# ]\r\nwith tf.device('/cpu:0'):\r\n def read_new_data(path,train = True,train_test_ration = 0.9):\r\n listFilesPath = []\r\n list_subFolder = os.listdir(path)\r\n for name_subfolder in list_subFolder:\r\n subfolder_path = path + '/' + name_subfolder\r\n gt_folder_path = subfolder_path + '/' + 'groundtruth'\r\n input_folder_path = subfolder_path + '/' + 'input'\r\n BG_folder_path = subfolder_path + '/bg'\r\n\r\n gt_files = [f for f in os.listdir(gt_folder_path) if os.path.isfile(os.path.join(gt_folder_path, f))]\r\n input_file = [f for f in os.listdir(input_folder_path) if os.path.isfile(os.path.join(input_folder_path, f))]\r\n BG_file = [f for f in os.listdir(BG_folder_path) if os.path.isfile(os.path.join(BG_folder_path, f))]\r\n if train:\r\n for i in range(0,round(len(input_file)*train_test_ration)):\r\n input_image_path = input_folder_path + '/'+input_file[i]\r\n gt_image_path = gt_folder_path + '/'+ gt_files[i]\r\n bg_image_path = BG_folder_path + '/'+ BG_file[i]\r\n temp_path=[input_image_path, bg_image_path, gt_image_path]\r\n listFilesPath.append(temp_path)\r\n else:\r\n for i in range(round(len(input_file)*train_test_ration),len(input_file)):\r\n input_image_path = input_folder_path + '/'+input_file[i]\r\n gt_image_path = gt_folder_path + '/'+ gt_files[i]\r\n bg_image_path = BG_folder_path + '/'+ BG_file[i]\r\n temp_path=[input_image_path, bg_image_path, gt_image_path]\r\n listFilesPath.append(temp_path)\r\n return listFilesPath\r\n\r\n def read_old_data(path,train = False,train_test_ration = 0.9):\r\n listFilesPath = []\r\n list_subFolder = os.listdir(path)\r\n\r\n for name_subfolder in list_subFolder:\r\n subfolder_path = path + '/' + name_subfolder\r\n gt_folder_path = subfolder_path + '/' + 'groundtruth'\r\n input_folder_path = subfolder_path + '/' + 'input'\r\n BG_folder_path = subfolder_path + '/bg'\r\n\r\n #read infor_txt file\r\n temporalROI_path = subfolder_path + '/temporalROI.txt'\r\n content = open(temporalROI_path, 'r')\r\n content = content.readline( )\r\n data_content = [int(x) for x in content.split( )]\r\n\r\n start_frame = data_content[0]\r\n if len(data_content) == 3:\r\n stop_frame = data_content[2]\r\n else:\r\n stop_frame = data_content[1]\r\n\r\n number_frames = stop_frame - start_frame\r\n train_frame_end = start_frame+round(number_frames * train_test_ration)\r\n\r\n # gt_files = [f for f in os.listdir(gt_folder_path) if os.path.isfile(os.path.join(gt_folder_path, f))]\r\n # input_file = [f for f in os.listdir(input_folder_path) if os.path.isfile(os.path.join(input_folder_path, f))]\r\n # BG_file = [f for f in os.listdir(BG_folder_path) if os.path.isfile(os.path.join(BG_folder_path, f))]\r\n if train:\r\n for i in range(start_frame, train_frame_end):\r\n input_image_path = input_folder_path + '/in{0:06d}.jpg'.format(i + 1)\r\n gt_image_path = gt_folder_path + '/gt{0:06d}.png'.format(i + 1)\r\n bg_image_path = BG_folder_path + '/bg{0:06d}.jpg'.format(i + 1)\r\n temp_path = [input_image_path, bg_image_path, gt_image_path]\r\n listFilesPath.append(temp_path)\r\n else:\r\n for i in range(train_frame_end + 1, stop_frame):\r\n input_image_path = input_folder_path + '/in{0:06d}.jpg'.format(i + 1)\r\n gt_image_path = gt_folder_path + '/gt{0:06d}.png'.format(i + 1)\r\n bg_image_path = BG_folder_path + '/bg{0:06d}.jpg'.format(i + 1)\r\n temp_path = [input_image_path, bg_image_path, gt_image_path]\r\n listFilesPath.append(temp_path)\r\n\r\n\r\n return listFilesPath\r\n\r\n def read_data(list_all, s):\r\n # list_all = read_new_data(path,train)\r\n current_images = []\r\n reference_images = []\r\n groundtruth_images = []\r\n\r\n list = list_all[s*_BATCH_SIZE:min((s+1)*_BATCH_SIZE,len(list_all))]\r\n\r\n for _temp_ in list:\r\n\r\n _current_image_ = cv2.imread(_temp_[0]).astype('float') / 255\r\n _reference_image_ = cv2.imread(_temp_[1]).astype('float') / 255\r\n _groundtruth_image_ = cv2.imread(_temp_[2])\r\n\r\n _groundtruth_image_ = _groundtruth_image_[:,:,0]\r\n _groundtruth_image_[_groundtruth_image_ < 255] = 0\r\n _groundtruth_image_[_groundtruth_image_ == 255] = 1\r\n\r\n _current_image_ = cv2.resize(_current_image_, dsize=(320, 240))\r\n _reference_image_ = cv2.resize(_reference_image_, dsize=(320, 240))\r\n _groundtruth_image_ = cv2.resize(_groundtruth_image_, dsize=(320, 240))\r\n\r\n\r\n\r\n current_images.append(_current_image_)\r\n reference_images.append(_reference_image_)\r\n groundtruth_images.append(_groundtruth_image_)\r\n\r\n return [current_images,reference_images,groundtruth_images]\r\n", "sub_path": "load_data.py", "file_name": "load_data.py", "file_ext": "py", "file_size_in_byte": 5813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "tensorflow.device", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "37662847", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 21:10:43 2018\n\n@author: dell\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure,output_notebook,show\n\n#所有特征之间的pearson相关系数,用热点图表示\ndef draw_all_features():\n data = pd.read_csv('result.csv')\n pearson = data.corr()\n sns.heatmap(pearson)\n plt.show()\n return\n\n#不同类型的方位与duration之间的关系\ndef show_bearing():\n data = pd.read_csv('result.csv')\n sns.regplot(x = 'bearing', y = 'trip_duration', data = data)\n plt.show()\n \n sns.regplot(x = 'bearing_pick_cent_p', y = 'trip_duration', data = data)\n plt.show()\n sns.regplot(x = 'bearing_drop_cent_p', y = 'trip_duration', data = data)\n plt.show()\n sns.regplot(x = 'bearing_cent_p_cent_d', y = 'trip_duration', data = data)\n plt.show()\n return\n\n#label_pick,label_drop与duration的关系\ndef show_label():\n data = pd.read_csv('result.csv')\n label_pick = pd.DataFrame(data.groupby('label_pick')['trip_duration'].mean())\n label_pick.reset_index(inplace = True)\n label_drop = pd.DataFrame(data.groupby('label_drop')['trip_duration'].mean())\n label_drop.reset_index(inplace = True)\n \n label_pick['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('label_pick')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n label_pick['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('label_drop')\n plt.ylabel('avg_trip_duration')\n plt.show()\n\ndef show_centroid():\n data = pd.read_csv('result.csv')\n centroid_pick_long = pd.DataFrame(data.groupby('centroid_pick_long')['trip_duration'].mean())\n centroid_pick_long.reset_index(inplace = True)\n centroid_pick_lat = pd.DataFrame(data.groupby('centroid_pick_lat')['trip_duration'].mean())\n centroid_pick_lat.reset_index(inplace = True)\n centroid_drop_long = pd.DataFrame(data.groupby('centroid_drop_long')['trip_duration'].mean())\n centroid_drop_long.reset_index(inplace = True)\n centroid_drop_lat = pd.DataFrame(data.groupby('centroid_drop_lat')['trip_duration'].mean())\n centroid_drop_lat.reset_index(inplace = True)\n \n centroid_pick_long['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_pick_long')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n centroid_pick_lat['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_pick_lat')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n centroid_drop_long['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_drop_long')\n plt.ylabel('avg_trip_duration')\n plt.show()\n \n centroid_drop_lat['trip_duration'].plot(kind = 'line', rot = 0)\n plt.xlabel('centroid_drop_lat')\n plt.ylabel('avg_trip_duration')\n plt.show()\n return\n\ndef show_hvsine_distance():\n train_cl = pd.read_csv('result.csv')\n \n hvsine_pick_cent_p = train_cl.loc[(train_cl.hvsine_pick_cent_p < 60)]\n sns.regplot(x = 'hvsine_pick_cent_p', y = 'trip_duration', data = hvsine_pick_cent_p)\n plt.show()\n hvsine_drop_cent_d = train_cl.loc[(train_cl.hvsine_drop_cent_d < 100)]\n sns.regplot(x = 'hvsine_drop_cent_d', y = 'trip_duration', data = hvsine_drop_cent_d)\n plt.show()\n hvsine_cent_p_cent_d = train_cl.loc[(train_cl.hvsine_cent_p_cent_d < 50)]\n sns.regplot(x = 'hvsine_cent_p_cent_d', y = 'trip_duration', data = hvsine_cent_p_cent_d)\n plt.show() \n return\n\ndef show_manhtn_distance():\n train_cl = pd.read_csv('result.csv')\n \n manhtn_pick_cent_p = train_cl.loc[(train_cl.manhtn_pick_cent_p < 60)]\n sns.regplot(x = 'manhtn_pick_cent_p', y = 'trip_duration', data = manhtn_pick_cent_p)\n plt.show()\n manhtn_drop_cent_d = train_cl.loc[(train_cl.manhtn_drop_cent_d < 100)]\n sns.regplot(x = 'manhtn_drop_cent_d', y = 'trip_duration', data = manhtn_drop_cent_d)\n plt.show()\n manhtn_cent_p_cent_d = train_cl.loc[(train_cl.manhtn_cent_p_cent_d < 50)]\n sns.regplot(x = 'manhtn_cent_p_cent_d', y = 'trip_duration', data = manhtn_cent_p_cent_d)\n plt.show() \n \n \nplt.style.use({'figure.figsize':(12, 8)})\n#show_manhtn_distance()\n ", "sub_path": "nyc/JYK/visualizaition.py", "file_name": "visualizaition.py", "file_ext": "py", "file_size_in_byte": 4233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "seaborn.regplot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 88, "usage_type": "call"}, {"api_name": "seaborn.regplot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 102, "usage_type": "call"}, {"api_name": "seaborn.regplot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "seaborn.regplot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 115, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "450226401", "text": "import os.path\nimport json\nfrom models.delegate_info import DelegateInfoStatus\n##\n# Notification filter. Filter delegates for notifications\n##\n\nDELEGATE_STATUS_LOG_FILE = \"delegateinfo.log\"\n## 30 minutes on telegram notifications\nTIME_TELEGRAM_NOTIFICATION = (1000*60)*60\n\n## Return the last config file stored\ndef read_last_log():\n if os.path.exists(DELEGATE_STATUS_LOG_FILE):\n logFile = open(DELEGATE_STATUS_LOG_FILE, 'r')\n fileContent = logFile.read()\n logFile.close()\n return json.loads(fileContent)\n\n else:\n return None\n\n## Write the file\ndef write_last_log(delegateList, currentTime):\n logFileJson = {}\n logFileJson['lastTime'] = currentTime\n\n for delegate in delegateList:\n logFileJson[delegateList[delegate]['name']] = 'Forging'\n\n logFile = open(DELEGATE_STATUS_LOG_FILE, 'w')\n logFile.write(json.dumps(logFileJson))\n logFile.close()\n\n\ndef checkTelegramNotification (timestamp, delegateName, currentStatus, lastLog):\n ## If there is no log, return true\n if lastLog is None:\n return True\n ## Log exists\n else:\n ## Status red or orange -> notify\n if currentStatus is DelegateInfoStatus.STATUS_NOT_FORGING or currentStatus is DelegateInfoStatus.STATUS_CYCLE_LOST:\n return True\n ## If 30 min elapsed after the last notification and the status is forging or not found, send another notification\n if (currentStatus is DelegateInfoStatus.STATUS_FORGING or currentStatus is DelegateInfoStatus.STATUS_NOT_FOUND) and timestamp > (lastLog['lastTime'] + TIME_TELEGRAM_NOTIFICATION):\n return True\n ## time elapsed < 30 min and status is forging or not found -> not notify again\n else:\n return False\n", "sub_path": "notification_filter.py", "file_name": "notification_filter.py", "file_ext": "py", "file_size_in_byte": 1754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "models.delegate_info.DelegateInfoStatus.STATUS_NOT_FORGING", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.delegate_info.DelegateInfoStatus", "line_number": 43, "usage_type": "name"}, {"api_name": "models.delegate_info.DelegateInfoStatus.STATUS_CYCLE_LOST", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.delegate_info.DelegateInfoStatus.STATUS_FORGING", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.delegate_info.DelegateInfoStatus", "line_number": 46, "usage_type": "name"}, {"api_name": "models.delegate_info.DelegateInfoStatus.STATUS_NOT_FOUND", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "299347589", "text": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom simpleapi import Namespace\n\nfrom models import Snippet\n\nclass PasteNamespace(Namespace):\n \n def new(self, content, title='', author='', expires=60*60):\n \n new_snippet = Snippet(\n title=title,\n author=author,\n content=content,\n expires=datetime.datetime.now() + datetime.timedelta(seconds=expires)\n )\n new_snippet.save()\n \n return new_snippet.get_absolute_url()\n new.published = True\n new.constraints = {'expires': int}", "sub_path": "dpaste/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 563, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "simpleapi.Namespace", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Snippet", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "147295467", "text": "from PyPDF2 import PdfFileWriter, PdfFileReader\nimport io\nimport csv\n\nfrom reportlab.pdfgen import canvas\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\n\n# Adding custom fonts. 1st parm is the name of the font and 2nd is the path to the ttf font file.\npdfmetrics.registerFont(TTFont('Roboto', 'RobotoMono-Medium.ttf'))\npdfmetrics.registerFont(TTFont('RobotoL', 'RobotoMono-Light.ttf'))\npdfmetrics.registerFont(TTFont('RobotoB', 'RobotoMono-Bold.ttf'))\n\n\n# Function to return a pdf page with the parameters added into it.\ndef createpage(name):\n packet = io.BytesIO()\n can = canvas.Canvas(packet)\n \n \n #can.setFont('RobotoL', 17) # Setting the font and size of text.\n #can.drawString(300, 310, name) # Drawing a string onto the page. (x, y, string)\n\n # can.setFont('RobotoL', 48)\n # can.drawString(700, 350, name)\n #can.drawString(2110, 785, seat)\n #can.drawString(2110, 648, food)\n\n #can.setFont('RobotoL', 60)\n #can.drawString(1600, 648, seat)\n\n # =======================================================================================================\n # Code to centre a string between a starting and ending coordinates.\n\n can.setFont('RobotoB', 120)\n can.setFillColorRGB(85./255, 63./255, 153./255)\n\n # You'll have to determine the following values with the help of the helper file, get_pdf_coordinates.py\n start = 0\n end = 1560\n length_of_one_letter = 70 # Use some 'monospaced' font so that each letter will have the same length.\n y = 470\n\n mid = start + (end - start)/2\n half_string_size = (len(name)/2)*length_of_one_letter\n x = mid - half_string_size\n can.drawString(x, y, name)\n # =======================================================================================================\n \n\n can.save() # Save the canvas\n\n\n packet.seek(0)\n # Creating a pdf with just the canvas we just created.\n new_pdf = PdfFileReader(packet)\n\n # Read your existing PDF (ticket.pdf)\n existing_pdf = PdfFileReader(open(\"idcard____.pdf\", \"rb\"))\n # Add the canvas on the existing page\n page = existing_pdf.getPage(0)\n page2 = new_pdf.getPage(0)\n page.mergePage(page2)\n\n return page\n\n\ndef create_one(name):\n name = name.upper()\n output = PdfFileWriter()\n\n page = createpage(name)\n output.addPage(page) # Adding that page to the pdf.\n\n # Writing it to a file.\n outputStream = open(\"id_card/\"+name+\".pdf\", \"wb\")\n output.write(outputStream)\n outputStream.close()\n print('certificate generated for ' + name) \n\nif __name__==\"__main__\":\n\n '''\n with open('../newTransactions.csv', 'r') as f:\n reader = csv.reader(f, delimiter=',')\n names = [row[7] for row in reader]\n\n for name in names:\n create_one(name)'''\n create_one('arundhati') ", "sub_path": "cards/create_certificate.py", "file_name": "create_certificate.py", "file_ext": "py", "file_size_in_byte": 2921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "reportlab.pdfbase.pdfmetrics.registerFont", "line_number": 10, "usage_type": "call"}, {"api_name": "reportlab.pdfbase.pdfmetrics", "line_number": 10, "usage_type": "name"}, {"api_name": "reportlab.pdfbase.ttfonts.TTFont", "line_number": 10, "usage_type": "call"}, {"api_name": "reportlab.pdfbase.pdfmetrics.registerFont", "line_number": 11, "usage_type": "call"}, {"api_name": "reportlab.pdfbase.pdfmetrics", "line_number": 11, "usage_type": "name"}, {"api_name": "reportlab.pdfbase.ttfonts.TTFont", "line_number": 11, "usage_type": "call"}, {"api_name": "reportlab.pdfbase.pdfmetrics.registerFont", "line_number": 12, "usage_type": "call"}, {"api_name": "reportlab.pdfbase.pdfmetrics", "line_number": 12, "usage_type": "name"}, {"api_name": "reportlab.pdfbase.ttfonts.TTFont", "line_number": 12, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 17, "usage_type": "call"}, {"api_name": "reportlab.pdfgen.canvas.Canvas", "line_number": 18, "usage_type": "call"}, {"api_name": "reportlab.pdfgen.canvas", "line_number": 18, "usage_type": "name"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 56, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 59, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileWriter", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "574023987", "text": "#!/usr/env python\n\n# version 0.9\n# developed by Artur Zych\n# 06.2016, updated 10.2016\n\nimport time\nimport sys\nimport pyodbc\nimport os\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n\nMAIN_WINDOW_TITLE = \"CBRE Time Tracker\"\nCOMPANY = 'CBRE'\nTOOL_NAME = 'Time Tracker'\nACTIVITY_SELECTION_CAPTION = 'Select an activity:'\n\nMENU_BUTTONS_STYLE = 'QPushButton {background-color: rgba(0, 125, 0, 0); color: #ffffff;}'\nMAIN_WINDOW_LABEL_STYLE = 'QLabel {color: #ffffff;}'\nSTOP_BUTTON_STYLE = 'QPushButton {border-style: outset; border-width: 1px; background-color: #860927;' \\\n 'border-color: #fdfdfd; color: rgb(252, 252, 252);}'\nSTART_BUTTON_STYLE = 'QPushButton {border-style: outset; border-width: 1px; background-color: #50c752;' \\\n 'border-color: #fdfdfd; color: rgb(252, 252, 252);}'\nINACTIVE_STYLE = 'QPushButton {border-style: outset; border-width: 1px; background-color: #959596;' \\\n 'border-color: #fdfdfd; color: rgb(252, 252, 252);}'\nACTIVITIES_STYLE = 'QComboBox {border-style: outset; border-width: 1px; ' \\\n 'border-color: rgb(80, 199, 82); color: rgb(80, 199, 82);}'\n\nBG = 'bg2.jpg'\nICO = '32x32.ico'\n\nSTART_TIME = ''\nSTOP_TIME = ''\nDATE = ''\n\nDB_PATH = r'''Y:\\12 Analyst\\HACKATHON\\MySQL2.accdb'''\nDB_CONN_STRING = \"DRIVER={Microsoft Access Driver (*.mdb, *.accdb)}; \" \\\n \"DBQ=%s; UID=%s;, autocommit=True\" % (DB_PATH, 'azych')\n\nGET_GROUP_ID = 'SELECT * FROM Users WHERE Name_Users=?'\nGET_ACTIVITY_ID = 'SELECT ID_Activities FROM Activities WHERE Name_Activities=?'\nGET_USER_ID = 'SELECT ID_Users FROM Users WHERE Name_Users=?'\nGET_CLIENT_NAMES = 'SELECT Name_Clients FROM Clients WHERE ID_Users=?'\nGET_ACTIVITIES = 'SELECT Name_Activities FROM Activities WHERE ID_Groups=?'\nGET_PROJECT_ID = 'SELECT ID_Projects FROM Project WHERE Name_Projects=?'\nGET_CLIENT_ID = 'SELECT ID_Clients FROM Clients WHERE Name_Clients=?'\n\n\ndef get_sql_single_data(key_value, sql_query):\n db_connection = pyodbc.connect(DB_CONN_STRING)\n db_cursor = db_connection.cursor()\n db_cursor.execute(sql_query, key_value)\n return_value = db_cursor.fetchall()[0][0]\n db_connection.close()\n return return_value\n\n\ndef get_sql_list_data(key_value, sql_query):\n db_connection = pyodbc.connect(DB_CONN_STRING)\n db_cursor = db_connection.cursor()\n db_cursor.execute(sql_query, key_value)\n return_list = [i[0] for i in db_cursor.fetchall()]\n db_connection.close()\n return return_list\n\n\nsaved_flag = True\n\n\nclass WindowLabel(QLabel):\n def __init__(self, window, text, style, fontsize, posX, posY, bold=False, act_label=False):\n super().__init__(text, window)\n self.setStyleSheet(style)\n if act_label:\n self.setMinimumSize(QSize(500, 20))\n if bold:\n self.setFont(QFont(\"\", fontsize, QFont.Bold, True))\n else:\n self.setFont(QFont(\"\", fontsize, True))\n self.move(posX, posY)\n\n\nclass WindowButton(QPushButton):\n def __init__(self, window, text, style, qsizeA, qsizeB, posX, posY, func, flat=False, fontsize=0):\n super().__init__(\"\", window)\n self.setText(text)\n self.setStyleSheet(style)\n self.resize(QSize(qsizeA, qsizeB))\n self.move(posX, posY)\n self.clicked.connect(func)\n if flat:\n self.setFlat(True)\n if fontsize:\n self.setFont(QFont(\"\", fontsize, QFont.Bold, True))\n\n\nclass Activities_list(QComboBox):\n def __init__(self, window, list, posX, posY, width):\n super().__init__(window)\n self.addItems(list)\n self.move(posX, posY)\n self.setFixedWidth(width)\n\n\nclass Activity():\n def __init__(self, start_time='', activity_name='', username='', end_time='', days_date='',\n client='', project_name=''):\n self.start_time = start_time\n self.end_time = end_time\n self.activity_name = activity_name\n self.username = username\n self.user_id = 0.0\n self.group_id = 0.0\n self.date = days_date\n self.full_start_date = ''\n self.full_end_date = ''\n self.project = ''\n self.client_id = 0.0\n self.project_name = project_name\n self.client = client\n\n def save(self):\n self.end_time = time.strftime(\"%H\" + \":\" + \"%M\" + \":\" + \"%S\")\n self.date = time.strftime('%d/%m/%Y')\n self.user_id = float(get_sql_single_data(self.username, GET_USER_ID))\n self.activity_name = float(get_sql_single_data(self.activity_name, GET_ACTIVITY_ID))\n self.group_id = float(get_sql_single_data(self.username, GET_GROUP_ID))\n self.full_start_date = str(self.date + ' ' + self.start_time)\n self.full_end_date = str(self.date + ' ' + self.end_time)\n self.project = str(get_sql_single_data(self.project_name, GET_PROJECT_ID))\n self.client_id = float(get_sql_single_data(self.client, GET_CLIENT_ID))\n\n db_connection = pyodbc.connect(DB_CONN_STRING)\n db_cursor = db_connection.cursor()\n db_cursor.execute('INSERT into User_Activities (ID_Users, ID_Activities, Start_UA, End_UA, '\n 'ID_Clients, Comments_UA, ID_Group, Project) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n (self.user_id, self.activity_name, self.full_start_date, self.full_end_date,\n self.client_id, '', self.group_id, self.project))\n db_connection.commit()\n db_connection.close()\n\n\nclass ClockR(object):\n def __init__(self, label, totalTime):\n self.label = label\n self.totalTime = totalTime\n self.timer = QTimer(interval=1000)\n self.timer.timeout.connect(self.update_timer)\n\n def update_timer(self):\n self.totalTime += 1\n self.count()\n\n def start(self):\n self.timer.start()\n\n def count(self):\n self.label.setStyleSheet(MAIN_WINDOW_LABEL_STYLE)\n self.label.setText(time.strftime(\"%H\" + \":\" + \"%M\" + \":\" + \"%S\"))\n\n\ndef main():\n\n new_activity = Activity()\n\n\n def save_and_quit():\n global new_activity\n global saved_flag\n app_instance.quit()\n # if saved_flag:\n # app_instance.quit()\n # else:\n # new_activity.save()\n # app_instance.quit()\n\n\n def start_activity():\n global new_activity\n global saved_flag\n new_activity = Activity(start_time=time.strftime(\"%H\" + \":\" + \"%M\" + \":\" + \"%S\"),\n activity_name=activities_list.currentText(),\n username=os.getlogin(),\n client=clients_list.currentText(),\n project_name=project_list.currentText())\n start_button.setEnabled(False)\n start_button.setStyleSheet(INACTIVE_STYLE)\n stop_button.setEnabled(True)\n stop_button.setStyleSheet(STOP_BUTTON_STYLE)\n saved_flag = False\n current_activity_label.move(18, 245)\n current_activity_label.setText('\\tCurrent: {} | {} | '\n '{} started: {}'.format(\n new_activity.client, new_activity.project_name,\n new_activity.activity_name, new_activity.start_time))\n\n\n def stop_activity():\n global new_activity\n start_button.setEnabled(True)\n start_button.setStyleSheet(START_BUTTON_STYLE)\n stop_button.setEnabled(False)\n stop_button.setStyleSheet(INACTIVE_STYLE)\n new_activity.save()\n saved_flag = True\n current_activity_label.move(100, 245)\n current_activity_label.setText('Current: {0} | {0} | {0} started: {0}'.format('N/A'))\n\n\n group_id = str(get_sql_single_data(os.getlogin().lower(), GET_GROUP_ID))\n user_id = float(get_sql_single_data(os.getlogin().lower(), GET_USER_ID))\n data = get_sql_list_data(group_id, GET_ACTIVITIES)\n client_names = (get_sql_list_data(user_id, GET_CLIENT_NAMES))\n projects = ['proj ' + str(i) for i in range(1, 16)]\n\n\n app = QApplication(sys.argv)\n app_instance = QCoreApplication.instance()\n main_window = QWidget()\n palette = QPalette()\n\n\n palette.setBrush(QPalette.Background, QBrush(QPixmap(BG)))\n main_window.setPalette(palette)\n main_window.setWindowIcon(QIcon(ICO))\n main_window.setFixedSize(430, 350)\n main_window.setWindowTitle(MAIN_WINDOW_TITLE)\n main_window.setWindowFlags(Qt.FramelessWindowHint)\n main_window.manual_window = None\n\n\n label_company = WindowLabel(main_window, COMPANY, MAIN_WINDOW_LABEL_STYLE, 28, 150, 5, True)\n label_tool_name = WindowLabel(main_window, TOOL_NAME, MAIN_WINDOW_LABEL_STYLE, 20, 127, 45)\n client_label = WindowLabel(main_window, 'Client: ', MAIN_WINDOW_LABEL_STYLE, 10, 90, 144, False)\n proj_label = WindowLabel(main_window, 'Project: ', MAIN_WINDOW_LABEL_STYLE, 10, 90, 168, False)\n activity_label = WindowLabel(main_window, 'Activity: ', MAIN_WINDOW_LABEL_STYLE, 10, 90, 192, False)\n quit_button = WindowButton(main_window, '[X]', MENU_BUTTONS_STYLE,\n 18, 17, 2, 0, save_and_quit, True)\n minimize_button = WindowButton(main_window, '[_]', MENU_BUTTONS_STYLE,\n 18, 17, 412, 0, main_window.showMinimized, True)\n start_button = WindowButton(main_window, 'START', START_BUTTON_STYLE, 14, 30, 20, 285, start_activity, True)\n start_button.setFixedSize(120, 35)\n stop_button = WindowButton(main_window, 'STOP', STOP_BUTTON_STYLE, 14, 30, 290, 285, stop_activity, True)\n stop_button.setFixedSize(120, 35)\n stop_button.setEnabled(False)\n stop_button.setStyleSheet(INACTIVE_STYLE)\n activities_list = Activities_list(main_window, data, 20, 180, 120)\n activities_list.move(142, 190)\n clients_list = Activities_list(main_window, client_names, 20, 180, 120)\n clients_list.move(142, 142)\n project_list = Activities_list(main_window, projects, 20, 180, 120)\n project_list.move(142, 166)\n user_label = WindowLabel(main_window, '', MAIN_WINDOW_LABEL_STYLE, 10, 164, 222, act_label=True)\n user_label.setText('User: {}'.format(os.getlogin()))\n current_activity_label = WindowLabel(main_window, '', MAIN_WINDOW_LABEL_STYLE, 10, 100, 245, act_label=True)\n current_activity_label.setText('Current: {0} | {0} | {0} started: {0}'.format('N/A'))\n clock_label = WindowLabel(main_window, '', MAIN_WINDOW_LABEL_STYLE, 20, 150, 90)\n clock_label.setFixedWidth(150)\n\n\n rtm_clock = ClockR(clock_label, 0)\n rtm_clock.start()\n\n\n main_window.show()\n sys.exit(app.exec_())\n\n\nmain()\n", "sub_path": "PyQt_doodles/track.py", "file_name": "track.py", "file_ext": "py", "file_size_in_byte": 10622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pyodbc.connect", "line_number": 53, "usage_type": "call"}, {"api_name": "pyodbc.connect", "line_number": 62, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 126, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 127, "usage_type": "call"}, {"api_name": "pyodbc.connect", "line_number": 136, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 162, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 184, "usage_type": "call"}, {"api_name": "os.getlogin", "line_number": 186, "usage_type": "call"}, {"api_name": "os.getlogin", "line_number": 213, "usage_type": "call"}, {"api_name": "os.getlogin", "line_number": 214, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.getlogin", "line_number": 257, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 269, "usage_type": "call"}]} +{"seq_id": "45742473", "text": "from django.shortcuts import render\nfrom django.views import generic\nfrom listings.models import Listing\nfrom realtors.models import Realtor\nfrom listings.choices import bedroom_choices, price_choices, province_choices\n\n\nclass HomeView(generic.ListView):\n model = Listing\n queryset = Listing.objects.all().order_by(\"-list_date\")[:3]\n template_name = \"index.html\"\n context_object_name = \"listings\"\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super(HomeView, self).get_context_data()\n context['bedroom_choices'] = bedroom_choices\n context['price_choices'] = price_choices\n context['province_choices'] = province_choices\n return context\n\n\ndef about(request):\n realtors = Realtor.objects.all()\n context = {\n \"realtors\": realtors\n }\n return render(request, template_name=\"about.html\", context=context)\n\n\ndef search(request):\n queryset_listing = Listing.objects.all().order_by(\"-list_date\")\n\n #keywords\n\n if \"keywords\" in request.GET:\n keywords = request.GET['keywords']\n if keywords:\n queryset_listing = queryset_listing.filter(description__icontains=keywords)\n\n #city\n\n if \"city\" in request.GET:\n city = request.GET['city']\n if city:\n queryset_listing = queryset_listing.filter(city__iexact=city)\n\n #Province\n\n if \"province\" in request.GET:\n province = request.GET['province']\n if province:\n queryset_listing = queryset_listing.filter(province__iexact=province)\n\n # Bedrooms\n if \"bedrooms\" in request.GET:\n bedrooms = request.GET['bedrooms']\n if bedrooms:\n queryset_listing = queryset_listing.filter(bedrooms__lte=bedrooms)\n\n # Price\n if \"price\" in request.GET:\n price = request.GET['price']\n if price:\n queryset_listing = queryset_listing.filter(price__lte=price)\n\n context = {\n \"bedroom_choices\": bedroom_choices,\n \"price_choices\": price_choices,\n \"province_choices\": province_choices,\n \"listings\": queryset_listing,\n \"values\": request.GET\n }\n return render(request, template_name=\"search.html\", context=context)\n", "sub_path": "pages/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.views.generic.ListView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 8, "usage_type": "name"}, {"api_name": "listings.models.Listing", "line_number": 9, "usage_type": "name"}, {"api_name": "listings.models.Listing.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "listings.models.Listing.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "listings.models.Listing", "line_number": 10, "usage_type": "name"}, {"api_name": "listings.choices.bedroom_choices", "line_number": 16, "usage_type": "name"}, {"api_name": "listings.choices.price_choices", "line_number": 17, "usage_type": "name"}, {"api_name": "listings.choices.province_choices", "line_number": 18, "usage_type": "name"}, {"api_name": "realtors.models", "line_number": 23, "usage_type": "name"}, {"api_name": "realtors.models.Realtor.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "realtors.models.Realtor.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "realtors.models.Realtor", "line_number": 23, "usage_type": "name"}, {"api_name": "realtors.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "listings.models.Listing.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "listings.models.Listing.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "listings.models.Listing", "line_number": 31, "usage_type": "name"}, {"api_name": "listings.choices.bedroom_choices", "line_number": 67, "usage_type": "name"}, {"api_name": "listings.choices.price_choices", "line_number": 68, "usage_type": "name"}, {"api_name": "listings.choices.province_choices", "line_number": 69, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "357829084", "text": "import threading, time, pandas, sqlite3, platform\nfrom concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor,as_completed\nfrom functools import wraps\n\ndb_lock = threading.Lock()\n\nif platform.system() == \"Windows\":\n database_name = \"D:/shopee.db\"\n driver_path = 'D:/chromedriver_win32/chromedriver.exe'\nelse:\n database_name = \"/root/shopee.db\"\n driver_path = \"/root/chromedriver.exe\"\n\ndef mydb(sql, values=(), many=False):\n with sqlite3.connect(database_name) as db:\n if 'select' in sql:\n cur = db.execute(sql, values)\n rv = cur.fetchall()\n else:\n with db_lock:\n if many:\n db.executemany(sql, values)\n else:\n db.execute(sql, values)\n db.commit()\n rv = None\n return rv\n\ndef snow(tsp=None):\n tsp = int(tsp) if tsp else None\n t = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(tsp))\n return t\n\ndef unsnow(s):\n st = time.strptime(s,\"%Y-%m-%d %H:%M:%S\")\n tp = int(time.mktime(st))\n return tp\n\ndef data2book(data, name):\n path = './static/{}.xlsx'.format(name)\n book = pandas.ExcelWriter(path)\n df = pandas.DataFrame(data)\n df.to_excel(book, sheet_name='Sheet1', index=False, header=False)\n book.save()\n return path\n\n#新线程伪异步装饰器\ndef decor_async(func):\n @wraps(func)\n def wrapped_function(*args, **kwargs):\n print('one new thread started for ', func.__name__)\n mission = threading.Thread(target=func, args=args, kwargs=kwargs)\n mission.start()\n return wrapped_function\n\n#失败重试装饰器\ndef decor_retry(func):\n @wraps(func)\n def wrapped_function(*args, **kwargs):\n try:\n result = func(*args, **kwargs)\n except:\n print(func.__name__, \" failed , try again later\")\n time.sleep(5)\n result = func(*args, **kwargs)\n return result\n return wrapped_function\n\n#多任务并发, 线程版\ndef multiple_mission(func, args_list, max_number=16):\n num = len(args_list)\n print('total mission number is ', num)\n for i in range(num):\n args = args_list[i]\n while threading.active_count() > max_number + 1:\n print('reach max mission number, waiting...')\n time.sleep(1)\n mission = threading.Thread(target=func, args = args)\n mission.start()\n print('start mission NO.', i)\n return\n\n#多任务并发, 线程版, 加线程池\ndef multiple_mission_pool(func, args_list, max_workers=16, debug=False):\n if debug:\n arg = args_list[0]\n func(*arg)\n return \n count, num = 0, len(args_list)\n print('total mission number is ', num)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n future_list = [executor.submit(func, *args) for args in args_list]\n # for future in as_completed(future_list):\n # result = future.result()\n # count += 1\n # rate = round(count/num, 2)\n # msg = 'total mission {}, completed {}, {}%'.format(num, count, rate)\n print('all missions done')\n\n# #多任务并发,协程版,慢\n# def multiple_mission_gevent(func, args_list, max_workers=32):\n# num = len(args_list)\n# print('total mission number is ', num)\n# jobs = [gevent.spawn(func, args) for args in args_list]\n# gevent.wait(jobs)\n# print('all missions done')\n# return", "sub_path": "api_tools.py", "file_name": "api_tools.py", "file_ext": "py", "file_size_in_byte": 3438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "threading.Lock", "line_number": 5, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 35, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 52, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 49, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 58, "usage_type": "call"}, {"api_name": "threading.active_count", "line_number": 75, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 77, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 78, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "1793555", "text": "from django.db import models\nfrom safedelete import SOFT_DELETE_CASCADE, SOFT_DELETE\nfrom safedelete.models import SafeDeleteModel\nfrom safedelete.tests.models import Article, Author, Category\n\nfrom unittest.mock import patch\nimport pytest\npytestmark = pytest.mark.django_db\n\nclass Press(SafeDeleteModel):\n name = models.CharField(max_length=200)\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n\n\nclass PressNormalModel(models.Model):\n name = models.CharField(max_length=200)\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n\n\nclass CustomAbstractModel(SafeDeleteModel):\n\n class Meta:\n abstract = True\n\n\nclass ArticleView(CustomAbstractModel):\n _safedelete_policy = SOFT_DELETE_CASCADE\n\n article = models.ForeignKey(Article, on_delete=models.CASCADE)\n\n@pytest.fixture()\ndef authors():\n return (\n Author.objects.create(),\n Author.objects.create(),\n Author.objects.create(),\n )\n\n@pytest.fixture()\ndef categories():\n return (\n Category.objects.create(name='category 0'),\n Category.objects.create(name='category 1'),\n Category.objects.create(name='category 2'),\n )\n\n@pytest.fixture()\ndef articles(authors,categories):\n return (\n Article.objects.create(author=authors[1]),\n Article.objects.create(author=authors[1], category=categories[1]),\n Article.objects.create(author=authors[2], category=categories[2]),\n )\n\n@pytest.fixture()\ndef press(articles):\n return Press.objects.create(name='press 0', article=articles[2])\n\n\ndef test_soft_delete_cascade(authors,categories,articles,press):\n assert Author.objects.count()== 3\n assert Article.objects.count() == 3\n assert Category.objects.count() == 3\n assert Press.objects.count() == 1\n\n authors[2].delete(force_policy=SOFT_DELETE_CASCADE)\n\n assert Author.objects.count() == 2\n assert Author.all_objects.count() == 3\n assert Article.objects.count() == 2\n assert Article.all_objects.count() == 3\n assert Press.objects.count() == 0\n assert Press.all_objects.count() == 1\n\n\ndef test_soft_delete_cascade_with_normal_model(authors,categories,articles,press):\n PressNormalModel.objects.create(name='press 0', article=articles[2])\n authors[2].delete(force_policy=SOFT_DELETE_CASCADE)\n\n assert PressNormalModel.objects.count() == 1\n\n assert Author.objects.count() == 2\n assert Author.all_objects.count() == 3\n assert Article.objects.count() == 2\n assert Article.all_objects.count() == 3\n assert Press.objects.count() == 0\n assert Press.all_objects.count() == 1\n\n\ndef test_soft_delete_cascade_with_abstract_model(authors,categories,articles,press):\n ArticleView.objects.create(article=articles[2])\n\n articles[2].delete(force_policy=SOFT_DELETE_CASCADE)\n\n assert Article.objects.count() == 2\n assert Article.all_objects.count() == 3\n\n assert ArticleView.objects.count() == 0\n assert ArticleView.all_objects.count() == 1\n\ndef test_soft_delete_cascade_deleted(authors,categories,articles,press):\n articles[0].delete(force_policy=SOFT_DELETE)\n assert authors[1].article_set.count() == 1\n\n with patch('safedelete.tests.models.Article.delete') as delete_article_mock:\n authors[1].delete(force_policy=SOFT_DELETE_CASCADE)\n # delete_article_mock.assert_called_once doesn't work on py35\n assert delete_article_mock.call_count == 1\n\n\ndef test_undelete_with_soft_delete_cascade_policy(authors,categories,articles,press):\n authors[2].delete(force_policy=SOFT_DELETE_CASCADE)\n authors[2].undelete(force_policy=SOFT_DELETE_CASCADE)\n\n assert Author.objects.count() == 3\n assert Article.objects.count() == 3\n assert Category.objects.count() == 3\n assert Press.objects.count() == 1\n", "sub_path": "safedelete/tests/test_soft_delete_cascade.py", "file_name": "test_soft_delete_cascade.py", "file_ext": "py", "file_size_in_byte": 3750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}, {"api_name": "safedelete.models.SafeDeleteModel", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 12, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "safedelete.models.SafeDeleteModel", "line_number": 20, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 29, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author.objects.create", "line_number": 34, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 34, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.objects.create", "line_number": 35, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 35, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.objects.create", "line_number": 36, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 31, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Category.objects.create", "line_number": 42, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Category.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Category", "line_number": 42, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Category.objects.create", "line_number": 43, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Category.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Category", "line_number": 43, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Category.objects.create", "line_number": 44, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Category.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Category", "line_number": 44, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 39, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects.create", "line_number": 50, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 50, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.create", "line_number": 51, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 51, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.create", "line_number": 52, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 52, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 47, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 55, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects.count", "line_number": 61, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 61, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.count", "line_number": 62, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 62, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Category.objects.count", "line_number": 63, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Category.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Category", "line_number": 63, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 66, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.objects.count", "line_number": 68, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 68, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.all_objects.count", "line_number": 69, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.all_objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 69, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.count", "line_number": 70, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 70, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.all_objects.count", "line_number": 71, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.all_objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 71, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 78, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.objects.count", "line_number": 82, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 82, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.all_objects.count", "line_number": 83, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.all_objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 83, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.count", "line_number": 84, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 84, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.all_objects.count", "line_number": 85, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.all_objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 85, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 93, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.count", "line_number": 95, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 95, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.all_objects.count", "line_number": 96, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.all_objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 96, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE", "line_number": 102, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 105, "usage_type": "call"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 106, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 112, "usage_type": "name"}, {"api_name": "safedelete.SOFT_DELETE_CASCADE", "line_number": 113, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Author.objects.count", "line_number": 115, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Author.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Author", "line_number": 115, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Article.objects.count", "line_number": 116, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Article.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Article", "line_number": 116, "usage_type": "name"}, {"api_name": "safedelete.tests.models.Category.objects.count", "line_number": 117, "usage_type": "call"}, {"api_name": "safedelete.tests.models.Category.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "safedelete.tests.models.Category", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "587835414", "text": "import pandas as pd\r\nimport os\r\nfrom sklearn import preprocessing\r\nfrom collections import deque\r\nimport random\r\nimport numpy as np\r\nimport time\r\n#import tensorflow as tf\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, LSTM, CuDNNLSTM, BatchNormalization\r\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\r\nfrom keras import optimizers\r\nimport matplotlib.pyplot as plt\r\n\r\nSEQ_LEN = 7\r\nFUTURE_PERIOD_PREDICT = 3\r\nRATIO_TO_PREDICT = \"XOMX30\"\r\nEPOCHS = 5\r\nBATCH_SIZE = 64\r\nNAME = f\"{SEQ_LEN}-SEQ-{FUTURE_PERIOD_PREDICT}-PRED-{int(time.time())}\"\r\n\r\ndef classify (current, future):\r\n if float(future) > float(current):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef preprocess_df(df):\r\n df = df.drop(\"target\", 1)\r\n df.dropna(inplace=True)\r\n #\r\n # for col in df.columns:\r\n # if col != \"future\":\r\n # df[col] = df[col].pct_change(fill_method='ffill') #normalizing the data\r\n #\r\n # df[col] = preprocessing.scale(df[col].values) #scaling the data\r\n #\r\n # df.dropna(inplace=True)\r\n\r\n sequential_data = []\r\n prev_days = deque(maxlen=SEQ_LEN)\r\n for i in df.values:\r\n prev_days.append([n for n in i[:-1]])\r\n if len(prev_days) == SEQ_LEN:\r\n sequential_data.append([np.array(prev_days), i[-1]])\r\n\r\n random.shuffle(sequential_data)\r\n\r\n buys = []\r\n sells = []\r\n\r\n for seq, target in sequential_data:\r\n if target == 0:\r\n sells.append([seq, target])\r\n elif target == 1:\r\n buys.append([seq, target])\r\n\r\n random.shuffle(buys)\r\n random.shuffle(sells)\r\n\r\n lower = min(len(buys), len(sells))\r\n\r\n buys = buys[:lower]\r\n sells = sells[:lower]\r\n\r\n sequential_data = buys+sells\r\n\r\n random.shuffle(sequential_data)\r\n\r\n X = []\r\n y = []\r\n\r\n for seq, target in sequential_data:\r\n X.append(seq)\r\n y.append(target)\r\n\r\n return np.array(X), y\r\n\r\n\r\ndf = pd.read_csv(f\"G:\\Programming\\Projects\\Index_price_movement\\All_Stock_Data\\{RATIO_TO_PREDICT}.csv\")\r\ndf.rename(columns = {\"Adj Close\": \"AdjClose\"}, inplace=True)\r\ndf[\"Date\"] = pd.to_datetime(df[\"Date\"], format=\"%Y-%m-%d\")\r\ndf.set_index(\"Date\", inplace = True)\r\ndf = df[[\"AdjClose\"]]\r\n\r\n# df.plot()\r\n# plt.show()\r\n\r\n# df['future'] = df[\"AdjClose\"].shift(-FUTURE_PERIOD_PREDICT)\r\n#\r\n# df['target'] = list(map(classify, df[\"AdjClose\"], df[\"future\"]))\r\n#\r\n#\r\n# main_df = df\r\n#\r\n# times = sorted(main_df.index.values)\r\n# last_10pct = times[-int(0.10*len(times))]\r\n#\r\n# validation_main_df = main_df[main_df.index >= last_10pct]\r\n# main_df = main_df[main_df.index < last_10pct]\r\n#\r\n# train_x, train_y = preprocess_df(main_df)\r\n#\r\n# validation_x, validation_y = preprocess_df(validation_main_df)\r\n#\r\n# print (f\"train_data: {len(train_x)} validation: {len(validation_x)}\")\r\n# print (f\"Dont buys: {train_y.count(0)}, buys: {train_y.count(1)}\")\r\n# print (f\"VALIDATION Dont buys: {validation_y.count(0)}, buys: {validation_y.count(1)}\")\r\n#\r\n# #define model\r\n# model = Sequential()\r\n# model.add(LSTM(50, activation='relu', input_shape=(SEQ_LEN, FUTURE_PERIOD_PREDICT)))\r\n# model.add(Dense(1))\r\n# model.compile(optimizer='adam', loss='mse')\r\n#\r\n# #fit model\r\n# model.fit(train_x, train_y, epochs=EPOCHS, verbose=1)\r\n#\r\n# # demonstrate prediction\r\n# tboard = TensorBoard(log_dir=f\"new_logs/{NAME}\")\r\n#\r\n# filepath = \"new_models/RNN_Final-{epoch:02d}-{val_acc}.hdf5\"\r\n# checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') # saves only the best ones\r\n# callbacks_list = [tboard, checkpoint]\r\n#\r\n# history = model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(validation_x, validation_y), callbacks=callbacks_list, verbose=1)\r\n#\r\n# #history = model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(validation_x, validation_y), verbose=1)\r\n#\r\n# #history = model.fit(train_x, train_y, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1)\r\n\r\n", "sub_path": "univariate_analysis.py", "file_name": "univariate_analysis.py", "file_ext": "py", "file_size_in_byte": 3967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 47, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 58, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 59, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "387022351", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import Request\nimport re\nfrom datetime import datetime\nfrom newscrawl.items import newsItem\nimport time\n\n\nclass HeYuanDailySpider(scrapy.Spider):\n name = \"heyuandaily\"\n allowed_domains = [\"heyuan.cn\"]\n base_url = \"http://epaper.heyuan.cn/\"\n newspapers = \"河源日报\"\n today = datetime.today()\n\n def start_requests(self):\n date = self.today\n sdate = date.strftime('%Y-%m/%d')\n url = self.base_url + 'html/%s/node_1.htm' % sdate\n yield Request(url, self.parse)\n\n def parse(self, response):\n pages = response.xpath('//*[@id=\"bmdhTable\"]/tbody/tr/td[1]/a')\n #当前页面没数据则重爬\n if response.status == 404 or not pages:\n time.sleep(1800) #等待30分钟\n yield Request(response.url, self.parse, dont_filter=True) #设置不过滤URL(实现不过滤重复URL)\n for page in pages:\n list_category = page.xpath('text()').extract()\n str_category = \"\".join(list_category)\n category = str_category.split(':')[1]\n page_path = page.xpath('@href').extract()[0]\n page_index = re.findall('\\d{1,}', page_path)\n url = re.sub('(?<=_)\\d{1,}',page_index[0],response.url)\n yield Request(url, self.page_parse, dont_filter=True, meta={'category':category})\n\n def page_parse(self, response):\n articles = response.xpath('//*[@id=\"main-ed-articlenav-list\"]/table/tbody/tr/td[2]/div/a/@href').extract()\n category = response.meta['category']\n for article in articles:\n url = re.sub('(node_\\d{1,}\\.htm)',article,response.url)\n yield Request(url, self.article_parse, meta={'category':category})\n\n def article_parse(self, response):\n list_title = response.xpath('//p[@class=\"BSHARE_TEXT\"]/text()').extract()\n title = \"\".join(list_title)\n list_page = response.xpath('//*[@id=\"currentBM\"]/strong/text()').extract()\n str_page = \"\".join(list_page)\n page = re.findall('\\d{1,}', str_page)[0]\n list_content = response.xpath('//*[@id=\"ozoom\"]/founder-content/text()|//*[@id=\"ozoom\"]/founder-content/p/text()').extract()\n content = \"\".join(list_content)\n list_date = re.findall('(?<=/)\\d{1,}-\\d{1,}/\\d{1,}(?=/)', response.url)\n str_date = \"\".join(list_date)\n date = str_date.replace('/', '-')\n category = response.meta['category']\n if content == \"\":\n pass\n else:\n item = newsItem()\n item['title'] = title\n item['page'] = page\n item['content'] = content\n item['date'] = date\n item['category'] = category\n item['url'] = response.url\n item['newspapers'] = self.newspapers\n yield item\n", "sub_path": "newscrawl/spiders/heyuandaily.py", "file_name": "heyuandaily.py", "file_ext": "py", "file_size_in_byte": 2836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scrapy.Spider", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 28, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 34, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 35, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 36, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 42, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 43, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 53, "usage_type": "call"}, {"api_name": "newscrawl.items.newsItem", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "351870590", "text": "from pymongo import MongoClient\r\nfrom pymongo.errors import ConnectionFailure\r\n\r\nprint(\"Establishing connection with the database\".center(100, \"=\"))\r\nmyclient = MongoClient(\"mongodb://%s:%s@127.0.0.1\" % (\"myUserAdmin\", \"abc123\"))\r\nprint(\"Connection established successfully: \", myclient)\r\n\r\nmydatabase = myclient['database']\r\n\r\nmycollection = mydatabase['test3']\r\n\r\nmycollection.delete_many({})\r\n\r\nsample_data = [{\"x\": 1, \"tags\": [\"dog\", \"cat\"]},\r\n {\"x\": 2, \"tags\": [\"cat\"]},\r\n {\"x\": 2, \"tags\": [\"mouse\", \"cat\", \"dog\"]},\r\n {\"x\": 3, \"tags\": []}]\r\n\r\nresult = mycollection.insert_many(sample_data)\r\n\r\nfrom bson.son import SON\r\n\r\npipeline = [\r\n { \"$unwind\": \"$tags\" }, #1. Make a flat hierarchy\r\n { \"$group\": { \"_id\": \"$tags\", \"count\": { \"$sum\": 1 } } }, #2. Actual Aggregation\r\n { \"$sort\": SON( [(\"count\", -1) , (\"_id\", -1) ] ) } #3. Display\r\n]\r\n\r\nimport pprint\r\nprint(\"Aggregation Pipeline\".center(100, \"=\"))\r\npprint.pprint(list(mycollection.aggregate(pipeline)))\r\nprint(\"\".center(100, \"=\"))\r\n\r\n\r\n", "sub_path": "prime-agg.py", "file_name": "prime-agg.py", "file_ext": "py", "file_size_in_byte": 1046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 5, "usage_type": "call"}, {"api_name": "bson.son.SON", "line_number": 26, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "651196149", "text": "import scrapy\nfrom scrapy import Selector\nfrom spiders.items import MaoyanItem\n\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['http://maoyan.com/']\n\n # 注释默认的parse函数\n # def parse(self, response):\n # pass\n\n\n # 爬虫启动时,引擎自动调用该方法,并且只会被调用一次,用于生成初始的请求对象(Request)。\n # start_requests()方法读取start_urls列表中的URL并生成Request对象,发送给引擎。\n # 引擎再指挥其他组件向网站服务器发送请求,下载网页\n def start_requests(self):\n url = 'https://maoyan.com/films?showType=3'\n yield scrapy.Request(url=url, callback=self.parse)\n # url 请求访问的网址\n # callback 回调函数,引擎回将下载好的页面(Response对象)发给该方法,执行数据解析\n # 这里可以使用callback指定新的函数,不是用parse作为默认的回调参数\n # 解析函数\n def parse(self, response):\n movies = Selector(response=response).xpath('//div[@class=\"movie-hover-info\"]')[:11]\n print(movies)\n for movie in movies:\n item = MaoyanItem()\n name = movie.xpath('./div[1]/span[1]/text()')\n type = movie.xpath('./div[2]/text()')[-1]\n date = movie.xpath('./div[4]/text()')[-1]\n item['name'] = name.extract_first().strip()\n item['type'] = type.extract().strip()\n item['date'] = date.extract().strip()\n yield item\n", "sub_path": "week02/spiders/spiders/spiders/maoyan.py", "file_name": "maoyan.py", "file_ext": "py", "file_size_in_byte": 1599, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.Selector", "line_number": 27, "usage_type": "call"}, {"api_name": "spiders.items.MaoyanItem", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "95293419", "text": "import datetime\nfrom dateutil.relativedelta import *\nimport time\nimport logging\n\nfrom django.shortcuts import render_to_response, get_object_or_404, get_list_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.models import User\nfrom django.template import RequestContext\nfrom django.utils.dates import MONTHS_3, MONTHS_3_REV\nfrom django.utils.encoding import force_unicode\n\nfrom calloway.models import Calendar, Event\nfrom calloway.forms import CalendarForm, EventForm\n\ndef get_monday():\n \"\"\"\n Function to return the most useful Monday.\n During the weekend return next Monday, otherwise return this weeks Monday.\n \"\"\"\n now = datetime.datetime.now()\n # Get next Monday date\n monday = now+relativedelta(weekday=MO)\n # If before Saturday, use previous Monday (-1 week)\n if now.weekday() < 5:\n monday = monday+relativedelta(weeks=-1)\n return monday\n\ndef add_css_positioning_to_events(events):\n minutes_in_day = float(1440) # 60 * 24\n \n def calculate_horizontal_position(j):\n if hasattr(events[j], 'horizontal_position_percentage'):\n #logging.debug('already positioned')\n return\n else:\n k = j\n l = events[j].total_number_of_horizontal_events\n #logging.debug('total events ' + str(len(events)))\n #logging.debug('clashing events ' + str(l))\n m = 0\n width = float(1) / float(l) * 100\n #logging.debug('the width of ' + events[k].summary + ' is ' + str(width))\n while m < l and k < len(events):\n #logging.debug('k is ' + str(k))\n events[k].horizontal_position_percentage = float(m) / float(l) * 100\n events[k].width_percentage = width\n k = k + 1\n m = m + 1 \n #logging.debug('yep')\n \n def calculate_start_and_end_points(j):\n # Calculate start and end points\n event = events[j]\n start = (event.dtstart.hour * 60) + event.dtstart.minute\n end = (event.dtend.hour * 60) + event.dtend.minute\n event.dtstart_pixels = start\n event.dtend_pixels = end\n #event.dtstart_percentage = (start / minutes_in_day) * 1440\n #event.dtend_percentage = (end / minutes_in_day) * 1440\n duration = event.dtend_pixels - event.dtstart_pixels\n if duration > 10: # If less than 10 minutes ( 10 pixels )\n event.duration_time_pixels = event.dtend_pixels - event.dtstart_pixels\n else:\n event.duration_time_pixels = 10\n \n i = 0\n for event in events:\n calculate_start_and_end_points(i)\n \n # Calculate clashes manually, to avoid multiple db queries.\n event.total_number_of_horizontal_events = 0\n for e in events:\n if e.dtstart < event.dtend and e.dtend > event.dtstart:\n event.total_number_of_horizontal_events = event.total_number_of_horizontal_events + 1\n \n #event.total_number_of_horizontal_events = events.filter(dtstart__lt=event.dtend, dtend__gt=event.dtstart).count()\n calculate_horizontal_position(i)\n \n i = i + 1\n \n return events\n\ndef redirect(post_save_redirect, obj):\n if post_save_redirect:\n return HttpResponseRedirect(post_save_redirect % obj.__dict__)\n elif hasattr(obj, 'get_absolute_url'):\n return HttpResponseRedirect(obj.get_absolute_url())\n else:\n raise ImproperlyConfigured(\n \"No URL to redirect to. Either pass a post_save_redirect\"\n \" parameter to the generic view or define a get_absolute_url\"\n \" method on the Model.\")\n\n@login_required\ndef event_detail(request, owner_username, calendar_slug, event_id, template='calloway/event_detail.html'):\n try:\n event = Event.objects.select_related().get(pk=event_id)\n except Event.DoesNotExist:\n raise Http404()\n event.primary_calendar = event.calendars.filter(slug=event.primary_calendar_slug, owner=event.owner)[0]\n if event.owner.username != owner_username:\n raise Http404()\n if event.primary_calendar.slug != calendar_slug:\n raise Http404()\n \n context = {\n 'event': event\n }\n \n if 'output' in request.GET and request.GET['output'] == 'json':\n from django.core import serializers\n json = serializers.serialize('json', [event])\n return HttpResponse(json, mimetype='text/javascript')\n \n return render_to_response(template, context,\n context_instance=RequestContext(request))\n \n@login_required \ndef create_event(request, owner_username, calendar_slug, template='calloway/create_event.html', post_save_redirect=None):\n if request.method == 'POST':\n from django.template.defaultfilters import slugify\n form = EventForm(request.POST)\n if form.is_valid():\n event = form.save(commit=False)\n event.calendar = get_object_or_404(Calendar, owner=request.user, slug='lessons')\n event.confirmed = False\n event.save()\n \n # ManyToMany relationship can only be updated once the instance is saved.\n # http://docs.djangoproject.com/en/dev/topics/forms/modelforms/#the-save-method\n event.users_with_read_access.add(request.user)\n form.save_m2m()\n \n return redirect(post_save_redirect, event)\n \n else:\n form = EventForm()\n context = {\n 'form': form\n }\n\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\ndef calendar_year(request, owner_username, calendar_slug, year, template='calloway/calendar_year.html'):\n \n return render_to_response(template, { 'year': year },\n context_instance=RequestContext(request))\n\n@login_required\ndef calendar_month(request, owner_username, calendar_slug, year, month, template='calloway/calendar_month.html'):\n # Get calendar by name for that user. Will only return calendars for the logged in username.\n owner = get_object_or_404(User, username=owner_username)\n calendar = get_object_or_404(Calendar, slug=calendar_slug, owner=owner)\n \n # Do some date work.\n month = int(MONTHS_3_REV[month])\n year = int(year)\n date = datetime.datetime(year, month, 1)\n next_month = date + relativedelta(months=+1)\n previous_month = date + relativedelta(months=-1)\n \n # Get events for that month (Could this be done in the template tag?)\n events = calendar.get_events_for_month(date)\n \n # Get availability_blocks for the month\n availability_blocks = calendar.get_availability_for_month(date)\n \n context = {\n 'calendar': calendar,\n 'year': date.year,\n 'month': date.month,\n 'date': date,\n 'next_month':next_month,\n 'next_month_url': \"/calendars/%s/%s/%s/%s/\" % (owner, calendar.slug, next_month.year, force_unicode(MONTHS_3[next_month.month])),\n 'previous_month': previous_month,\n 'previous_month_url': \"/calendars/%s/%s/%s/%s/\" % (owner, calendar.slug, previous_month.year, force_unicode(MONTHS_3[previous_month.month])),\n 'events': events,\n 'availability_blocks': availability_blocks\n }\n \n return render_to_response(template, context,\n context_instance=RequestContext(request))\n \ndef calendar_week(request, app, model, object_id, calendar_slug, year, month, day, model_instance=None, template='calloway/calendar_week.html'):\n \n # If the optional model_instance arg is passed in, this saves two db queries.\n if model_instance is not None:\n ct = ContentType.objects.get_for_model(model_instance)\n owner = model_instance\n # Otherwise lookup the ContentType and the object_id.\n else:\n ct = ContentType.objects.get(app_label=app, model=model)\n owner = ct.get_object_for_this_type(pk=object_id)\n \n calendar = get_object_or_404(Calendar, slug=calendar_slug, content_type=ct, object_id=owner.pk)\n\n # Do some date work.\n date = datetime.datetime(int(year), int(MONTHS_3_REV[month]), int(day))\n # Should be 'week starting', therefore day must be a Monday (settings.CALENDAR_WEEK_START_DAY?).\n if date.weekday() != 0:\n date = date + datetime.timedelta(days=-date.weekday())\n next_week = date + datetime.timedelta(days=+7)\n previous_week = date + datetime.timedelta(days=-7)\n \n events = calendar.get_events_for_week(date)\n add_css_positioning_to_events(events)\n context = {\n 'calendar': calendar,\n 'year': date.year,\n 'month': date.month,\n 'day': date.day,\n 'date': date,\n 'next_week': next_week,\n 'next_week_url': '/calendars/%s/%s/%s/%s/week/%s/%s/%s/' % (app, model, object_id, calendar_slug, next_week.year, force_unicode(MONTHS_3[next_week.month]), next_week.day),\n 'previous_week': previous_week,\n 'previous_week_url': '/calendars/%s/%s/%s/%s/week/%s/%s/%s/' % (app, model, object_id, calendar.slug, previous_week.year, force_unicode(MONTHS_3[previous_week.month]), previous_week.day),\n 'events': events\n }\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n \ndef calendar_day(request, owner_username, calendar_slug, year, month, day, template='calloway/calendar_month.html'):\n try:\n date = datetime.date(*time.strptime(year+month+day, '%Y%b%d')[:3])\n except ValueError:\n raise Http404\n\n return render_to_response(template, { 'year':year, 'month': date.month, 'day': day, 'date': date },\n context_instance=RequestContext(request))\n \ndef list_calendars(request, app, model, template='calloway/list_calendars.html'):\n ct = ContentType.objects.filter(app_label=app, model=model)\n calendars = Calendar.objects.filter(content_type=ct)\n context = {\n 'calendars': calendars\n }\n \n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\n\ndef list_users_in_group_calendars(request, group, template='calloway/list_users_in_group_calendars.html'):\n # Getting profile direct so it's not a separate query for each one.\n # TODO: Can't do this, it references ellis_manager!!!\n user_profiles = UserProfile.objects.filter(user__groups__name=group)\n context = {\n 'users': user_profiles\n }\n\n return render_to_response(template, context,\n context_instance=RequestContext(request))", "sub_path": "views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 10890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 88, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 90, "usage_type": "call"}, {"api_name": "calloway.models.Event.objects.select_related", "line_number": 100, "usage_type": "call"}, {"api_name": "calloway.models.Event.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "calloway.models.Event", "line_number": 100, "usage_type": "name"}, {"api_name": "calloway.models.Event.DoesNotExist", "line_number": 101, "usage_type": "attribute"}, {"api_name": "calloway.models.Event", "line_number": 101, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 102, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 105, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 107, "usage_type": "call"}, {"api_name": "django.core.serializers.serialize", "line_number": 115, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 115, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 116, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 118, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 119, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 97, "usage_type": "name"}, {"api_name": "calloway.forms.EventForm", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 128, "usage_type": "call"}, {"api_name": "calloway.models.Calendar", "line_number": 128, "usage_type": "argument"}, {"api_name": "calloway.forms.EventForm", "line_number": 140, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 145, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 146, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 121, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 150, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 151, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 156, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 156, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 157, "usage_type": "call"}, {"api_name": "calloway.models.Calendar", "line_number": 157, "usage_type": "argument"}, {"api_name": "django.utils.dates.MONTHS_3_REV", "line_number": 160, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 162, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_unicode", "line_number": 178, "usage_type": "call"}, {"api_name": "django.utils.dates.MONTHS_3", "line_number": 178, "usage_type": "name"}, {"api_name": "django.utils.encoding.force_unicode", "line_number": 180, "usage_type": "call"}, {"api_name": "django.utils.dates.MONTHS_3", "line_number": 180, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 185, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 186, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 153, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 192, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 192, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 192, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get", "line_number": 196, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 196, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 199, "usage_type": "call"}, {"api_name": "calloway.models.Calendar", "line_number": 199, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 202, "usage_type": "call"}, {"api_name": "django.utils.dates.MONTHS_3_REV", "line_number": 202, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 205, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 207, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_unicode", "line_number": 218, "usage_type": "call"}, {"api_name": "django.utils.dates.MONTHS_3", "line_number": 218, "usage_type": "name"}, {"api_name": "django.utils.encoding.force_unicode", "line_number": 220, "usage_type": "call"}, {"api_name": "django.utils.dates.MONTHS_3", "line_number": 220, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 223, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 228, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 228, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 230, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 232, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 233, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.filter", "line_number": 236, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 236, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 236, "usage_type": "name"}, {"api_name": "calloway.models.Calendar.objects.filter", "line_number": 237, "usage_type": "call"}, {"api_name": "calloway.models.Calendar.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "calloway.models.Calendar", "line_number": 237, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 242, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 243, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 254, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 255, "usage_type": "call"}]} +{"seq_id": "568673962", "text": "# Finish 2D convolution/filtering by your self.\n# What you are supposed to do can be described as \"median blur\", which means by using a sliding window\n# on an image, your task is not going to do a normal convolution, but to find the median value within\n# that crop.\n#\n# You can assume your input has only one channel. (a.k.a a normal 2D list/vector)\n# And you do need to consider the padding method and size. There are 2 padding ways: REPLICA & ZERO. When\n# \"REPLICA\" are given to you, the padded pixels are the same with the border pixels. E.g is [1 2 3] is your\n# image, the padded version will be [(...1 1) 1 2 3 (3 3...)] where how many 1 & 3 in the parenthesis\n# depends on your padding size. When \"ZERO\", the padded version will be [(...0 0) 1 2 3 (0 0...)]\n#\n# Assume your input's size of the image is W x H, kernel size's m x n. You may first complete a version\n# with O(W·H·m·n log(m·n)) to O(W·H·m·n·m·n)).\n# Follow up 1: Can it be completed in a shorter time complexity?\n# Follow up 2: Can it be completed in O(W·H·m·n)?\n#\n# Python version:\n# def medianBlur(img, kernel, padding_way):\n# img & kernel is List of List; padding_way a string\n# Please finish your code under this blank\n#\nimport numpy as np\nimport cv2\n\ndef medianBlur(img, kernel, padding_way,padding_size=2):\n img_padded = []\n h,w = img.shape\n kh,kw = kernel.shape\n if padding_way == 'REPLICA':\n img_padded=np.pad(img,padding_size,'edge')\n elif padding_way == 'ZERO':\n img_padded = np.pad(img,padding_size,'constant')\n\n out_h,out_w = (h+2*padding_size+1-kh),(w+2*padding_size+1-kw)\n img_pooling=np.zeros((out_h,out_w))\n for i in range(out_h):\n for j in range(out_w):\n img_pooling[i][j]=np.median(img_padded[i:i+kh,j:j+kw])\n\n return img_pooling\n\nif __name__=='__main__':\n img_gray=cv2.imread('lena.jpg',0)\n\n kernel = cv2.getGaussianKernel(5,1);\n kernel2D = kernel*kernel.T\n\n print(img_gray.shape)\n print(kernel2D.shape)\n img_padding_REPLICA = medianBlur(img_gray, kernel2D, 'REPLICA')\n img_padding_ZERO = medianBlur(img_gray, kernel2D, 'ZERO')\n img_padding_ZERO = img_padding_ZERO.astype(np.uint8)\n img_padding_REPLICA = img_padding_REPLICA.astype(np.uint8)\n\n cv2.imshow('img_padding_REPLICA',img_padding_REPLICA)\n cv2.imshow('img_padding_ZERO',img_padding_ZERO)\n key=cv2.waitKey()\n if key==27:\n cv2.destroyAllWindows()\n\n\n\n\n", "sub_path": "week2/medianblur.py", "file_name": "medianblur.py", "file_ext": "py", "file_size_in_byte": 2421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.pad", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.getGaussianKernel", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "240862021", "text": "\"\"\"\nThis module contains the Sanic routes, as well as functions necessary to retrieve video files. This module also\ncontains functions that will search the file structure for video files, as well as cleanup the DB video records.\n\nAll paths in the DB are relative. A Channel's directory is relative to the video_root_directory. A Video's path (as\nwell as its meta files) is relative to its Channel's directory.\n\n Example:\n Real Paths:\n video_root_directory = '/media/something'\n channel['directory'] = '/media/something/the channel'\n video['video_path'] = '/media/something/the channel/foo.mp4'\n video['poster_path'] = '/media/something/the channel/foo.jpg'\n video['video_path'] = '/media/something/the channel/subdir/bar.mp4'\n\n The same paths in the DB:\n channel['directory'] = 'the channel'\n video['video_path'] = 'foo.mp4'\n video['poster_path'] = 'foo.jpg'\n video['video_path'] = 'subdir/bar.mp4'\n\nRelative DB paths allow files to be moved without having to rebuild the entire collection. It also ensures that when\na file is moved, it will not be duplicated in the DB.\n\"\"\"\nimport asyncio\nimport pathlib\nfrom functools import wraps\nfrom http import HTTPStatus\nfrom uuid import uuid1\n\nfrom dictorm import DictDB\nfrom sanic import Blueprint, response\nfrom sanic.exceptions import abort\nfrom sanic.request import Request\n\nfrom lib.common import sanitize_link, boolean_arg, load_schema, env, attach_websocket_with_queue, get_sanic_url\nfrom lib.db import get_db_context\nfrom lib.plugins.videos.captions import process_captions\nfrom lib.plugins.videos.common import get_conflicting_channels, get_absolute_video_path, UnknownFile\nfrom lib.plugins.videos.downloader import insert_video, update_channels, download_all_missing_videos\nfrom lib.plugins.videos.main import logger\nfrom lib.plugins.videos.schema import downloader_config_schema, channel_schema\nfrom .common import generate_video_paths, save_settings_config, get_downloader_config, \\\n get_absolute_channel_directory, UnknownDirectory\n\nPLUGIN_ROOT = 'videos'\n\n\ndef set_plugins(plugins):\n global PLUGIN_ROOT\n PLUGIN_ROOT = plugins\n\n\napi_bp = Blueprint('api_video', url_prefix='/videos')\n\n\n@api_bp.put('/settings')\n@load_schema(downloader_config_schema)\ndef settings(request: Request, data: dict):\n downloader_config = get_downloader_config()\n downloader_config['video_root_directory'] = data['video_root_directory']\n downloader_config['file_name_format'] = data['file_name_format']\n save_settings_config(downloader_config)\n return response.json({'success': 'Settings saved'})\n\n\n@api_bp.get('/channels')\ndef get_channels(request: Request):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channels = Channel.get_where().order_by('name DESC')\n channels = list(channels)\n return response.json({'channels': channels})\n\n\nrefresh_queue, refresh_event = attach_websocket_with_queue('/feeds/refresh', 1000, api_bp)\n\n\n@api_bp.post('/settings/refresh')\nasync def refresh(_):\n \"\"\"\n Search for videos that have previously been downloaded and stored.\n \"\"\"\n refresh_logger = logger.getChild('refresh')\n\n # Only one refresh can run at a time\n if refresh_event.is_set():\n return response.json({'error': 'Refresh already running'}, HTTPStatus.BAD_REQUEST)\n\n refresh_event.set()\n refresh_queue.put('refresh-started')\n\n async def do_refresh():\n refresh_logger.info('refresh started')\n\n with get_db_context(commit=True) as (db_conn, db):\n for msg in _refresh_videos(db):\n refresh_queue.put(msg)\n\n refresh_queue.put('refresh-complete')\n refresh_logger.info('refresh complete')\n\n refresh_event.clear()\n\n coro = do_refresh()\n asyncio.ensure_future(coro)\n refresh_logger.debug('do_refresh scheduled')\n stream_url = get_sanic_url(scheme='ws', path='/api/videos/feeds/refresh')\n return response.json({'success': 'stream-started', 'stream-url': stream_url})\n\n\ndownload_queue, download_event = attach_websocket_with_queue('/feeds/download', 1000, api_bp)\n\n\n@api_bp.post('/settings/download')\nasync def download(_):\n \"\"\"\n Compare previously downloaded videos with newly updated catalogs. If any videos are missing, download them.\n :return:\n \"\"\"\n download_logger = logger.getChild('download')\n\n # Only one download can run at a time\n if download_event.is_set():\n return response.json({'error': 'download already running'}, HTTPStatus.BAD_REQUEST)\n\n download_event.set()\n download_queue.put('download-started')\n\n async def do_download():\n download_logger.info('download started')\n\n with get_db_context(commit=True) as (db_conn, db):\n for msg in update_channels(db_conn, db):\n download_queue.put(msg)\n for msg in download_all_missing_videos(db_conn, db):\n download_queue.put(msg)\n\n download_queue.put('download-complete')\n download_logger.info('download complete')\n\n download_event.clear()\n\n coro = do_download()\n asyncio.ensure_future(coro)\n download_logger.debug('do_download scheduled')\n stream_url = get_sanic_url(scheme='ws', path='/api/videos/feeds/download')\n return response.json({'success': 'stream-started', 'stream-url': stream_url})\n\n\n@api_bp.get('/channel/')\ndef channel_get(request: Request, link: str):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channel = Channel.get_one(link=link)\n if not channel:\n return response.json({'error': 'Unknown channel'}, HTTPStatus.NOT_FOUND)\n return response.json({'channel': channel})\n\n\n@api_bp.post('/channel')\n@load_schema(channel_schema)\ndef channel_post(request: Request, data: dict):\n \"\"\"Create a new channel\"\"\"\n try:\n data['directory'] = get_absolute_channel_directory(data['directory'])\n except UnknownDirectory:\n return response.json({'error': 'Unknown directory'}, HTTPStatus.BAD_REQUEST)\n\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n\n # Verify that the URL/Name/Link aren't taken\n conflicting_channels = get_conflicting_channels(\n db,\n url=data['url'],\n name_=data['name'],\n link=sanitize_link(data['name']),\n )\n if conflicting_channels:\n return response.json({'error': 'Channel Name or URL already taken'}, HTTPStatus.BAD_REQUEST)\n\n with db.transaction(commit=True):\n channel = Channel(\n name=data['name'],\n url=data['url'],\n match=data['match_regex'],\n link=sanitize_link(data['name']),\n )\n channel.flush()\n\n return response.json({'success': 'Channel created successfully'}, HTTPStatus.CREATED,\n {'Location': f'/api/videos/channel/{channel[\"link\"]}'})\n\n\n@api_bp.put('/channel/')\n@load_schema(channel_schema)\ndef channel_put(request: Request, link: str, data: dict):\n \"\"\"Update an existing channel\"\"\"\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n\n with db.transaction(commit=True):\n existing_channel = Channel.get_one(link=link)\n\n if not existing_channel:\n return response.json({'error': 'Unknown channel'}, 404)\n\n # Only update directory if it was empty\n if data['directory'] and not existing_channel['directory']:\n try:\n data['directory'] = get_absolute_channel_directory(data['directory'])\n except UnknownDirectory:\n return response.json({'error': 'Unknown directory'}, 404)\n else:\n data['directory'] = existing_channel['directory']\n data['directory'] = str(data['directory'])\n\n # Verify that the URL/Name/Link aren't taken\n conflicting_channels = get_conflicting_channels(\n db=db,\n id=existing_channel['id'],\n url=data['url'],\n name_=data['name'],\n link=data['link'],\n directory=data['directory'],\n )\n if list(conflicting_channels):\n return response.json({'error': 'Channel Name or URL already taken'}, 400)\n\n existing_channel['url'] = data['url']\n existing_channel['name'] = data['name']\n existing_channel['directory'] = data['directory']\n existing_channel['match_regex'] = data['match_regex']\n existing_channel.flush()\n\n return response.json({'success': 'The channel was updated successfully.'})\n\n\n@api_bp.delete('/channel/')\ndef channel_delete(request, link: str):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channel = Channel.get_one(link=link)\n if not channel:\n return response.json({'error': 'Unknown channel'}, HTTPStatus.NOT_FOUND)\n with db.transaction(commit=True):\n channel.delete()\n return response.json({'success': 'Channel deleted'})\n\n\n@api_bp.get('/channel//videos')\ndef channel_videos(request, link: str):\n db: DictDB = request.ctx.get_db()\n Channel = db['channel']\n channel = Channel.get_one(link=link)\n if not channel:\n return response.json({'error': 'Unknown channel'}, HTTPStatus.NOT_FOUND)\n return response.json({'videos': list(channel['videos'])})\n\n\n@api_bp.route('/video/')\n@api_bp.route('/poster/')\n@api_bp.route('/caption/')\nasync def media_file(request: Request, hash: str):\n db: DictDB = request.ctx.get_db()\n download = boolean_arg(request, 'download')\n Video = db['video']\n kind = str(request.path).split('/')[3]\n\n try:\n video = Video.get_one(video_path_hash=hash)\n path = get_absolute_video_path(video, kind=kind)\n if download:\n return await response.file_stream(str(path), filename=path.name)\n else:\n return await response.file_stream(str(path))\n except TypeError or KeyError or UnknownFile:\n abort(404, f\"Can't find {kind} by that ID.\")\n\n\ndef get_channel_form(form_data: dict):\n channel = dict(\n url=form_data.get('url'),\n name=form_data['name'],\n match_regex=form_data.get('match_regex'),\n link=sanitize_link(form_data['name']),\n directory=form_data.get('directory'),\n )\n return channel\n\n\ndef refresh_channel_videos(db, channel):\n \"\"\"\n Find all video files in a channel's directory. Add any videos not in the DB to the DB.\n \"\"\"\n # Set the idempotency key so we can remove any videos not touched during this search\n curs = db.get_cursor()\n curs.execute('UPDATE video SET idempotency=NULL WHERE channel_id=%s', (channel['id'],))\n idempotency = str(uuid1())\n\n directory = get_absolute_channel_directory(channel['directory'])\n\n # A set of absolute paths that exist in the file system\n possible_new_paths = set(generate_video_paths(directory))\n\n # Update all videos that match the current video paths\n query = 'UPDATE video SET idempotency = %s WHERE channel_id = %s AND video_path = ANY(%s) RETURNING video_path'\n relative_new_paths = [str(i.relative_to(directory)) for i in possible_new_paths]\n curs.execute(query, (idempotency, channel['id'], relative_new_paths))\n existing_paths = {i for (i,) in curs.fetchall()}\n\n # Get the paths for any video not yet in the DB\n # (paths in DB are relative, but we need to pass an absolute path)\n new_videos = {p for p in possible_new_paths if str(p.relative_to(directory)) not in existing_paths}\n\n for video_path in new_videos:\n logger.debug(f'{channel[\"name\"]}: Added {video_path}')\n insert_video(db, pathlib.Path(video_path), channel, idempotency=idempotency)\n\n curs.execute('DELETE FROM video WHERE channel_id=%s AND idempotency IS NULL RETURNING id', (channel['id'],))\n deleted_count = curs.fetchall()\n if deleted_count:\n deleted_count = len(deleted_count)\n deleted_status = f'Deleted {deleted_count} video records from channel {channel[\"name\"]}'\n logger.info(deleted_status)\n yield deleted_status\n\n status = f'{channel[\"name\"]}: {len(new_videos)} new videos, {len(existing_paths)} already existed. '\n logger.info(status)\n yield status\n\n # Fill in any missing captions\n query = 'SELECT id FROM video WHERE channel_id=%s AND caption IS NULL AND caption_path IS NOT NULL'\n curs.execute(query, (channel['id'],))\n missing_captions = [i for (i,) in curs.fetchall()]\n Video = db['video']\n for video_id in missing_captions:\n video = Video.get_one(id=video_id)\n process_captions(video)\n yield f'Processed captions for video {video_id}'\n\n status = f'Processed {len(missing_captions)} missing captions.'\n logger.info(status)\n yield status\n\n\ndef _refresh_videos(db: DictDB):\n \"\"\"\n Find any videos in the channel directories and add them to the DB. Delete DB records of any videos not in the\n file system.\n\n Yields status updates to be passed to the UI.\n\n :param db:\n :return:\n \"\"\"\n logger.info('Refreshing video files')\n Channel = db['channel']\n\n total_channels = Channel.count()\n\n for idx, channel in enumerate(Channel.get_where()):\n progress = int((idx / total_channels) * 100)\n yield {'progress': progress, 'message': f'Checking {channel[\"name\"]} directory for new videos'}\n with db.transaction(commit=True):\n for msg in refresh_channel_videos(db, channel):\n yield {'message': msg}\n yield {'progress': 100, 'message': 'All videos refreshed.'}\n\n\n@wraps(_refresh_videos)\ndef refresh_videos(db: DictDB):\n return list(_refresh_videos(db))\n\n\n@wraps(_refresh_videos)\ndef refresh_videos_with_db():\n with get_db_context(commit=True) as (db_conn, db):\n return refresh_videos(db)\n\n\ndef video_search(db: DictDB, search_str, offset, link):\n db_conn = db.conn\n template = env.get_template('lib/plugins/videos/templates/search_video.html')\n curs = db_conn.cursor()\n\n # Get the match count per channel\n query = 'SELECT channel_id, COUNT(*) FROM video WHERE textsearch @@ to_tsquery(%s) GROUP BY channel_id'\n curs.execute(query, (search_str,))\n channel_totals = {i: j for (i, j) in curs.fetchall()}\n\n # Get the names of each channel, add the counts respectively\n query = 'SELECT id, name, link FROM channel ORDER BY LOWER(name)'\n curs.execute(query)\n channels = []\n for (id_, name, link_) in curs.fetchall():\n channel_total = channel_totals[id_] if id_ in channel_totals else 0\n d = {\n 'id': id_,\n 'name': f'{name} ({channel_total})',\n 'link': link_,\n 'search_link': f'/{PLUGIN_ROOT}/search?link={link_}&search={search_str}',\n }\n channels.append(d)\n\n # Get the search results\n if link:\n # The results are restricted to a single channel\n curs.execute('SELECT id FROM channel WHERE link = %s', (link,))\n (channel_id,) = curs.fetchone()\n query = 'SELECT id, ts_rank_cd(textsearch, to_tsquery(%s)) FROM video WHERE ' \\\n 'textsearch @@ to_tsquery(%s) AND channel_id=%s ORDER BY 2 OFFSET %s LIMIT 20'\n curs.execute(query, (search_str, search_str, channel_id, offset))\n total = channel_totals[channel_id]\n else:\n # The results are for all channels\n query = 'SELECT id, ts_rank_cd(textsearch, to_tsquery(%s)) FROM video WHERE ' \\\n 'textsearch @@ to_tsquery(%s) ORDER BY 2 OFFSET %s LIMIT 20'\n curs.execute(query, (search_str, search_str, offset))\n # Sum up all the matches for paging\n total = sum(channel_totals.values())\n\n results = list(curs.fetchall())\n\n videos = []\n Video = db['video']\n if results:\n videos = [dict(i) for i in Video.get_where(Video['id'].In([i[0] for i in results]))]\n\n results = {\n 'template': template,\n 'videos': videos,\n 'total': total,\n 'channels': channels,\n }\n return results\n", "sub_path": "lib/plugins/videos/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 15983, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sanic.Blueprint", "line_number": 54, "usage_type": "call"}, {"api_name": "sanic.request.Request", "line_number": 59, "usage_type": "name"}, {"api_name": "common.get_downloader_config", "line_number": 60, "usage_type": "call"}, {"api_name": "common.save_settings_config", "line_number": 63, "usage_type": "call"}, {"api_name": "sanic.response.json", "line_number": 64, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 64, "usage_type": "name"}, {"api_name": "lib.common.load_schema", "line_number": 58, "usage_type": "call"}, {"api_name": "lib.plugins.videos.schema.downloader_config_schema", "line_number": 58, "usage_type": "argument"}, {"api_name": "sanic.request.Request", "line_number": 68, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 69, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 73, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 73, "usage_type": "name"}, {"api_name": "lib.common.attach_websocket_with_queue", "line_number": 76, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger.getChild", "line_number": 84, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 84, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 88, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 88, "usage_type": "name"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 88, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 88, "usage_type": "name"}, {"api_name": "lib.db.get_db_context", "line_number": 96, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 106, "usage_type": "call"}, {"api_name": "lib.common.get_sanic_url", "line_number": 108, "usage_type": "call"}, {"api_name": "sanic.response.json", "line_number": 109, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 109, "usage_type": "name"}, {"api_name": "lib.common.attach_websocket_with_queue", "line_number": 112, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger.getChild", "line_number": 121, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 121, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 125, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 125, "usage_type": "name"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 125, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 125, "usage_type": "name"}, {"api_name": "lib.db.get_db_context", "line_number": 133, "usage_type": "call"}, {"api_name": "lib.plugins.videos.downloader.update_channels", "line_number": 134, "usage_type": "call"}, {"api_name": "lib.plugins.videos.downloader.download_all_missing_videos", "line_number": 136, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 145, "usage_type": "call"}, {"api_name": "lib.common.get_sanic_url", "line_number": 147, "usage_type": "call"}, {"api_name": "sanic.response.json", "line_number": 148, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 148, "usage_type": "name"}, {"api_name": "sanic.request.Request", "line_number": 152, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 153, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 157, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 157, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 157, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 157, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 158, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 158, "usage_type": "name"}, {"api_name": "sanic.request.Request", "line_number": 163, "usage_type": "name"}, {"api_name": "common.get_absolute_channel_directory", "line_number": 166, "usage_type": "call"}, {"api_name": "common.UnknownDirectory", "line_number": 167, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 168, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 168, "usage_type": "name"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 168, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 168, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 170, "usage_type": "name"}, {"api_name": "lib.plugins.videos.common.get_conflicting_channels", "line_number": 174, "usage_type": "call"}, {"api_name": "lib.common.sanitize_link", "line_number": 178, "usage_type": "call"}, {"api_name": "sanic.response.json", "line_number": 181, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 181, "usage_type": "name"}, {"api_name": "http.HTTPStatus.BAD_REQUEST", "line_number": 181, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 181, "usage_type": "name"}, {"api_name": "lib.common.sanitize_link", "line_number": 188, "usage_type": "call"}, {"api_name": "sanic.response.json", "line_number": 192, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 192, "usage_type": "name"}, {"api_name": "http.HTTPStatus.CREATED", "line_number": 192, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 192, "usage_type": "name"}, {"api_name": "lib.common.load_schema", "line_number": 162, "usage_type": "call"}, {"api_name": "lib.plugins.videos.schema.channel_schema", "line_number": 162, "usage_type": "argument"}, {"api_name": "sanic.request.Request", "line_number": 198, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 200, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 207, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 207, "usage_type": "name"}, {"api_name": "common.get_absolute_channel_directory", "line_number": 212, "usage_type": "call"}, {"api_name": "common.UnknownDirectory", "line_number": 213, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 214, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 214, "usage_type": "name"}, {"api_name": "lib.plugins.videos.common.get_conflicting_channels", "line_number": 220, "usage_type": "call"}, {"api_name": "sanic.response.json", "line_number": 229, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 229, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 237, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 237, "usage_type": "name"}, {"api_name": "lib.common.load_schema", "line_number": 197, "usage_type": "call"}, {"api_name": "lib.plugins.videos.schema.channel_schema", "line_number": 197, "usage_type": "argument"}, {"api_name": "dictorm.DictDB", "line_number": 242, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 246, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 246, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 246, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 246, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 249, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 249, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 254, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 258, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 258, "usage_type": "name"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 258, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 258, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 259, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 259, "usage_type": "name"}, {"api_name": "sanic.request.Request", "line_number": 265, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 266, "usage_type": "name"}, {"api_name": "lib.common.boolean_arg", "line_number": 267, "usage_type": "call"}, {"api_name": "lib.plugins.videos.common.get_absolute_video_path", "line_number": 273, "usage_type": "call"}, {"api_name": "sanic.response.file_stream", "line_number": 275, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 275, "usage_type": "name"}, {"api_name": "sanic.response.file_stream", "line_number": 277, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 277, "usage_type": "name"}, {"api_name": "lib.plugins.videos.common.UnknownFile", "line_number": 278, "usage_type": "name"}, {"api_name": "sanic.exceptions.abort", "line_number": 279, "usage_type": "call"}, {"api_name": "lib.common.sanitize_link", "line_number": 287, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 300, "usage_type": "call"}, {"api_name": "common.get_absolute_channel_directory", "line_number": 302, "usage_type": "call"}, {"api_name": "common.generate_video_paths", "line_number": 305, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger.debug", "line_number": 318, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 318, "usage_type": "name"}, {"api_name": "lib.plugins.videos.downloader.insert_video", "line_number": 319, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 319, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger.info", "line_number": 326, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 326, "usage_type": "name"}, {"api_name": "lib.plugins.videos.main.logger.info", "line_number": 330, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 330, "usage_type": "name"}, {"api_name": "lib.plugins.videos.captions.process_captions", "line_number": 340, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger.info", "line_number": 344, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 344, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 348, "usage_type": "name"}, {"api_name": "lib.plugins.videos.main.logger.info", "line_number": 358, "usage_type": "call"}, {"api_name": "lib.plugins.videos.main.logger", "line_number": 358, "usage_type": "name"}, {"api_name": "dictorm.DictDB", "line_number": 373, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 372, "usage_type": "call"}, {"api_name": "lib.db.get_db_context", "line_number": 379, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 377, "usage_type": "call"}, {"api_name": "dictorm.DictDB", "line_number": 383, "usage_type": "name"}, {"api_name": "lib.common.env.get_template", "line_number": 385, "usage_type": "call"}, {"api_name": "lib.common.env", "line_number": 385, "usage_type": "name"}]} +{"seq_id": "473596836", "text": "\"\"\"classic Acrobot task\"\"\"\nfrom rlpy.Tools import wrap, bound, lines, fromAtoB, rk4\nfrom rlpy.Domains import Acrobot\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n__copyright__ = \"Copyright 2013, RLPy http://acl.mit.edu/RLPy\"\n__credits__ = [\"Alborz Geramifard\", \"Robert H. Klein\", \"Christoph Dann\",\n \"William Dabney\", \"Jonathan P. How\"]\n__license__ = \"BSD 3-Clause\"\n__author__ = \"Christoph Dann \"\n\nclass ModifiedAcrobot(Acrobot):\n episodeCap = 500\n\n def __init__(self, **kwargs):\n self.counter = 0\n super(ModifiedAcrobot, self).__init__()\n\n def step(self, a):\n s = self.state\n torque = self.AVAIL_TORQUE[a]\n\n # Add noise to the force action\n if self.torque_noise_max > 0:\n torque += self.random_state.uniform(-\n self.torque_noise_max, self.torque_noise_max)\n\n # Now, augment the state with our force action so it can be passed to\n # _dsdt\n s_augmented = np.append(s, torque)\n\n ns = rk4(self._dsdt, s_augmented, [0, self.dt])\n # only care about final timestep of integration returned by integrator\n ns = ns[-1]\n ns = ns[:4] # omit action\n # ODEINT IS TOO SLOW!\n # ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])\n # self.s_continuous = ns_continuous[-1] # We only care about the state\n # at the ''final timestep'', self.dt\n\n ns[0] = wrap(ns[0], -np.pi, np.pi)\n ns[1] = wrap(ns[1], -np.pi, np.pi)\n ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)\n ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)\n self.state = ns.copy()\n terminal = self.isTerminal()\n reward = self._reward_function(terminal)\n return reward, ns, terminal, self.possibleActions()\n\n def _reward_function(self, terminal):\n return -1. if not terminal else 0.\n\n def showDomain(self, a=0):\n self.counter += 1\n if self.counter % 2:\n return\n super(ModifiedAcrobot, self).showDomain(a)\n plt.pause(0.001)\n\n\nclass Acrobot_Mass1(ModifiedAcrobot):\n LINK_LENGTH_1 = 1. # [m]\n LINK_LENGTH_2 = 1. # [m]\n LINK_MASS_1 = 3. #: [kg] mass of link 1\n LINK_MASS_2 = 1.\n\nclass Acrobot_Mass2(ModifiedAcrobot):\n LINK_LENGTH_1 = 1. # [m]\n LINK_LENGTH_2 = 1. # [m]\n LINK_MASS_1 = 1. #: [kg] mass of link 1\n LINK_MASS_2 = 0.1", "sub_path": "rlgym/models/Acrobot/Mass1_Heavy/domain.py", "file_name": "domain.py", "file_ext": "py", "file_size_in_byte": 2461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rlpy.Domains.Acrobot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 31, "usage_type": "call"}, {"api_name": "rlpy.Tools.rk4", "line_number": 33, "usage_type": "call"}, {"api_name": "rlpy.Tools.wrap", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rlpy.Tools.wrap", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rlpy.Tools.bound", "line_number": 44, "usage_type": "call"}, {"api_name": "rlpy.Tools.bound", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "339804760", "text": "import numpy\nfrom chaco.api import ToolbarPlot, ArrayPlotData\nfrom chaco.tools.api import LineInspector\nfrom enable.component_editor import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import Item, View\n\nclass MyPlot(HasTraits):\n \"\"\" Plot where depth is the index such that the plot is vertical\n and the origin is the upper left\n \"\"\"\n plot = Instance(ToolbarPlot)\n\n traits_view = View(Item('plot', editor=ComponentEditor(),\n width=600, height=600, show_label=False))\n\n def __init__(self, depth, data_series, **kw):\n super(MyPlot, self).__init__(**kw)\n\n plot_data = ArrayPlotData(index=depth)\n plot_data.set_data('data_series', data_series)\n self.plot = ToolbarPlot(plot_data, orientation='v', origin='top left')\n line = self.plot.plot(('index', 'data_series'))[0]\n\n line_inspector = LineInspector(component=line, write_metadata=True)\n line.tools.append(line_inspector)\n line.overlays.append(line_inspector)\n\n\ndepth = numpy.arange(1.0, 100.0, 0.1)\ndata_series = numpy.sin(depth) + depth/10.0\n\nmy_plot = MyPlot(depth, data_series)\nmy_plot.configure_traits()\n", "sub_path": "examples/demo/depth.py", "file_name": "depth.py", "file_ext": "py", "file_size_in_byte": 1193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "traits.api.HasTraits", "line_number": 8, "usage_type": "name"}, {"api_name": "traits.api.Instance", "line_number": 12, "usage_type": "call"}, {"api_name": "chaco.api.ToolbarPlot", "line_number": 12, "usage_type": "argument"}, {"api_name": "traitsui.api.View", "line_number": 14, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 14, "usage_type": "call"}, {"api_name": "enable.component_editor.ComponentEditor", "line_number": 14, "usage_type": "call"}, {"api_name": "chaco.api.ArrayPlotData", "line_number": 20, "usage_type": "call"}, {"api_name": "chaco.api.ToolbarPlot", "line_number": 22, "usage_type": "call"}, {"api_name": "chaco.tools.api.LineInspector", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "513035049", "text": "import math\nimport datetime as dt\n\nclass Respondent:\n respondent_count = 0\n \n def __init__(self, respdict):\n self.respdict = respdict\n Respondent.respondent_count += 1\n \n \n # add the number of children at home and not at home\n def childnum(self):\n return self.respdict['childathome'] + self.respdict['childnotathome']\n \n # select the weeksworked## keys and calcuate the average of their values\n # calculating average weeks worked across the 20 years of the survey\n def avgweeksworked(self):\n workdict = {k : v for k, v in self.respdict.items() if k.startswith('weeksworked') and not math.isnan(v)}\n nweeks = len(workdict)\n if nweeks > 0:\n avgww = sum(workdict.values()) / nweeks\n else:\n avgww = 0\n \n return avgww\n \n # method for calculating age as of a given date\n def ageby(self, bydatestring):\n bydate = dt.datetime.strptime(bydatestring, '%Y%m%d')\n birthyear = self.respdict['birthyear']\n birthmonth = self.respdict['birthmonth']\n age = bydate.year - birthyear\n \n if (bydate.month < birthmonth or (bydate.month == birthmonth and bydate.day < 15)):\n age = age - 1\n return age\n \n # method to create a flag if the respondent ever enrolled at a 4-year college\n def baenrollment(self):\n colenrdict = {k : v for k, v in self.respdict.items() if k.startswith('colenr') and v == '3. 4-year college'}\n if (len(colenrdict) > 0):\n return 'Y'\n else:\n return 'N'\n ", "sub_path": "src/datacleaning/Chapter 10/helperfunctions/respondent.py", "file_name": "respondent.py", "file_ext": "py", "file_size_in_byte": 1612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "math.isnan", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "362964109", "text": "__author__ = \"Paul Schultz, Jobst Heitzig\"\n__date__ = \"Dec 15, 2016\"\n__version__ = \"v2.2\"\n\n# This file is based on the network creation algorithm published in:\n#\n# A Random Growth Model for Power Grids and Other Spatially Embedded Infrastructure Networks\n# Paul Schultz, Jobst Heitzig, and Juergen Kurths\n# Eur. Phys. J. Special Topics on \"Resilient power grids and extreme events\" (2014)\n# DOI: 10.1140/epjst/e2014-02279-6\n#\n\n\n# TODOs:\n# reparamterize ns\n\n\nimport numpy as np\nimport scipy.spatial.distance as sp\nfrom igraph import Graph\nfrom rtree.index import Index as rtree # install via conda install --channel https://conda.anaconda.org/IOOS rtree\n\n\nclass RpgAlgorithm(object):\n\n\n def __init__(self, L):\n\n # parameters for the algorithm\n self.L = L\n\n self.w = [.945,.05,.005] # JH: list of relative frequencies of nodes by level n_\\phi^l = w^l * n_\\phi\n self.n = [100,100,99800] #[2000,2000,60] this is basically n_\\phi summed over all levels\n self.n0 = [100,100,100] #[1250,250,50]\n self.p = [0,.1,.3]\n self.q = [0,.075,.075]\n self.s = [.2,.05,.0]\n self.r = [0., 0.7, 1.4]\n self.u = [0., 2., 2.]\n\n self.sampling = \"clotty\"\n self.alpha = 0.5\n self.beta = 0.95\n self.gamma = 0.5\n\n # counters\n self.levnodes = [[] for l in range(self.L)] # list of nodes in level\n self.cumnodes = [[] for l in range(self.L)] # list of nodes in level or higher\n self.added_nodes = [0 for l in range(self.L)]\n self.added_edges = [0 for l in range(self.L)]\n self.noffset = 0 # total no. nodes added so far\n\n # node coordinates\n self.lon = []\n self.lat = []\n self.lev = [] # level of node\n self.density = [] # distance to closest\n\n # CHANGE WITH CAUTION!\n self.distance_measure = \"euclidean\"\n self.debug = False\n\n \n def __str__(self):\n print(\"----------\")\n #print self.graph.num_vertices(), \"nodes and\", self.graph.num_edges(), \"edges\"\n for attr in vars(self):\n if attr in [\"identifier\", \"added_nodes\", \"n\", \"n0\", \"p\", \"q\", \"r\", \"s\", \"L\", \"w\"]:\n print((attr, \":\", str(getattr(self, attr))))\n return \"----------\"\n\n\n ###############################################################################\n # ## PUBLIC FUNCTIONS ## #\n ###############################################################################\n\n\n def set_params(self, **kwargs):\n for key in kwargs:\n if not hasattr(self, key):\n print((\"ERROR: There is no parameter called:\", key))\n print(\"Possible choices: n,n0,p,q,r,s,u\")\n continue\n else:\n if self._validation(key, kwargs[key]):\n setattr(self, key, kwargs[key])\n else:\n print((\"ERROR: invalid parameter value for\", key, kwargs[key]))\n\n\n def prepare(self, data=None):\n \"\"\"this should be called after set_params\"\"\"\n self.totaln = np.sum(self.n)\n\n self.setup_locations(sampling=self.sampling, locations=data)\n\n self.mst_edges = [None for l in range(self.L)]\n self.init_edges = [None for l in range(self.L)]\n\n #TODO: find way to dynamically add nodes without index problems instead of having a Graph of size totaln\n # see also the comment in _update_graphs()\n\n self.levgraph = [Graph(self.totaln) for l in range(self.L)] # one graph per level, storing only that level's edges\n self.cumgraph = [Graph(self.totaln) for l in range(self.L)] # one graph per level, storing that and all higher levels' edges\n\n self.levrtree = [None for l in range(self.L)] # one RTree per level, storing coordinates of level's nodes\n self.cumrtree = [None for l in range(self.L)] # one RTree per level, storing coordinates of this and higher level's nodes\n\n\n def initialise(self, l): # JH: l = level to initialise\n assert self.n[l] >= self.n0[l]\n\n # step I1: draw random locations from density and add nodes\n #######################################################\n self._get_locations(l, self.noffset, self.noffset + self.n0[l], init=True)\n\n # step I2: construct minimum spanning tree\n ##########################################\n edge_mask = self._initial_mst(l) # will store only nodes and edges on this level\n\n if np.sum(self.u) > 0:\n # extend and recompute density list:\n self.density += [0 for i in range(self.n0[l])]\n for node in range(self.noffset):\n ds = self._get_distances([node], list(range(self.noffset)))[0, :]\n ds[node] = 1e100\n self.density[node] = (1. / ds**2).sum()\n\n if self.debug:\n print((\"I2\", edge_mask))\n\n # step I3: add redundant links\n ##############################\n # CAUTION: logic has changed from original version! now it's simply the same as step G4, i.e., a little less optimal!\n \n m = min(int(np.floor(self.n0[l] * (1 - self.s[l]) * (self.p[l] + self.q[l]))), self.n0[l] * (self.n0[l] - 1) / 2 - (self.n0[l] - 1))\n\n for dummy in range(m):\n self._G34(l, np.random.choice(self.levnodes[l]))\n\n # assert self.added_edges[l] == (len(self.adjacency[l].keys()) / 2)\n assert self.added_edges[l] == self.levgraph[l].ecount()\n assert self.added_nodes[l] == len(self.levnodes[l])\n\n # label initial edges\n self.init_edges[l] = self.levgraph[l].get_edgelist()\n\n # step I4: add one link from first of level l added to nearest of level l-1:\n ############################################################################\n if l>0:\n node = self.noffset - self.n0[l]\n lat,lon = self.lat[node],self.lon[node]\n target = list(self.levrtree[l-1].nearest((lat,lon,lat,lon),1))[0]\n\n # update graphs:\n d = self._get_distances([node], [target])[0, 0]\n self._update_graphs(l-1, edges=[(target, node)], weights=[d])\n\n if self.debug:\n print((\"I4\", (node, target)))\n \n \n def grow(self, lmax):\n \"\"\"adds total no. of n[lmax] nodes to levels 0 <= l <= lmax\"\"\"\n\n new_nodes = list(range(self.noffset, self.n[lmax] - self.n0[lmax] + self.noffset))\n\n # draw level for node:\n # TODO: think about w, do we really need it? use n_phi^l instead?\n levels = np.random.choice(list(range(lmax + 1)), p=self.w[:lmax + 1] / np.sum(self.w[:lmax + 1]), size=len(new_nodes))\n #levels = np.repeat(range(lmax+1), repeats=self.n[:lmax+1])\n #np.random.shuffle(levels)\n\n\n\n for l, node in zip(levels, new_nodes):\n\n self.lev.append(l)\n\n # register new node\n self._update_graphs(l, nodes=[node])\n\n if self.debug:\n print(\"---------\")\n print((\"adding node\", node, \"to level\", l))\n\n # step G5: split random link at midpoint\n ########################################\n if (np.random.random() < self.s[l]) and self.levgraph[l].ecount() > 0:\n self._G5(l, node)\n\n else:\n # step G2: link to nearest\n ##########################\n self._G2(l, node)\n\n # step G3: add optimal redundant link to node\n #############################################\n if np.random.random() < self.p[l]:\n self._G34(l, node)\n\n # step G4: add another optimal redundant link to random node\n ############################################################\n if np.random.random() < self.q[l]:\n self._G34(l, np.random.choice(self.levnodes[l]))\n\n\n def cleanup(self):\n \"\"\" remove objects from memory\"\"\"\n del self.levrtree\n del self.cumrtree\n for level in range(self.L):\n del self.levgraph[level]\n del self.cumgraph[level]\n\n\n def setup_locations(self, sampling=\"uniform\", locations = None, centre=None, boundaries=None):\n \"\"\"\n setup function that returns locations, either randomly or from data\n :param sampling:\n :param locations:\n :param centre:\n :param boundaries:\n :return:\n \"\"\"\n if locations is not None:\n assert len(locations) == np.sum(self.n[:self.L])\n self.locations = locations\n self.counter = 0\n\n self.sampling = sampling\n self.centre = centre\n self.boundaries = boundaries\n\n # JH: docstring says this returns locations, but it returns nothing??\n \n\n ###############################################################################\n # ## PRIVATE FUNCTIONS ## #\n ###############################################################################\n\n\n def _get_coords(self, sampling=None, centre=None, boundaries=None):\n\n if sampling is not None:\n # override default sampling method\n self.sampling = sampling\n\n if self.sampling == \"uniform\":\n return self._uniformunitsquare(centre, boundaries)\n elif self.sampling == \"data\":\n pos0 = np.array(self.locations[self.counter])\n self.counter += 1\n pos1 = self.alpha * pos0 + (1 - self.alpha) * np.random.uniform(low=-1, high=1, size=2)\n pos2 = self.beta * pos0 + (1 - self.beta) * np.random.uniform(low=-.5, high=.5, size=2)\n return tuple(pos1 if np.random.random() < self.gamma else pos2)\n elif self.sampling == \"clotty\":\n l = len(self.lat)\n if l==0: return (0,0)\n i = np.random.choice(list(range(l)))\n pos0 = np.array([self.lat[i], self.lon[i]])\n pos1 = self.alpha * pos0 + (1 - self.alpha) * np.random.uniform(low=-1,high=1,size=2)\n pos2 = self.beta * pos0 + (1 - self.beta) * np.random.uniform(low=-.5, high=.5, size=2)\n return tuple(pos1 if np.random.random() < self.gamma else pos2)\n else:\n print(\"ERROR: Not implemented yet.\")\n exit(1)\n\n\n def _get_distances(self, sources, targets):\n \"\"\"\n return array of distances from nodes \"sources\" to list of nodes \"targets\"\n \"\"\"\n x = np.c_[np.array(self.lon)[sources], np.array(self.lat)[sources]]\n y = np.c_[np.array(self.lon)[targets], np.array(self.lat)[targets]]\n return sp.cdist(x, y, metric=self.distance_measure)\n \n\n def _uniformunitsquare(self, centre=None, boundaries=None):\n \"\"\"\n return point drawn uniformly at random\n\n :param centre: centre distribution around this point\n :param boundaries: array containing [width, height]\n :return: coordinate tuple\n \"\"\"\n\n if centre is None:\n centre = -1.\n if boundaries is None:\n boundaries = -1.\n\n return (.5 - np.random.uniform(size=2)) * np.array(boundaries) + np.array(centre)\n\n\n def _G2(self, l, node):\n # only now get one new location and nearest earlier node:\n target = self._get_locations(l, node, node+1)\n\n if target is not None:\n # update graphs:\n d = self._get_distances([node], [target])[0, 0]\n self._update_graphs(l, edges=[(target, node)], weights=[d])\n\n if self.debug:\n print((\"G2\", (node, target)))\n\n # update density:\n if np.sum(self.u) > 0:\n ds = self._get_distances([node], list(range(node)))[0, :]\n self.density = list(np.array(self.density) + 1. / ds**2) \n self.density.append((1. / ds**2).sum())\n \n\n def _G34(self, l, node):\n targets = list(set(self.cumnodes[l]).difference(self.cumgraph[l].neighbors(node)).difference([node]))\n if len(targets):\n dists = self._get_distances([node], targets)[0, :]\n prices = dists \n if self.r[l]>0: prices /= (dists + self.cumgraph[l].shortest_paths_dijkstra(node, targets)[0])**self.r[l]\n if self.u[l]>0: prices /= np.array(self.density)[targets] ** self.u[l]\n best = np.argmin(prices)\n a, b = self._s((targets[best], node))\n\n # update graphs:\n d = dists[best]\n self._update_graphs(l, edges=[(a, b)], weights=[d])\n\n if self.debug:\n print((\"G3/4\", (a, b)))\n\n\n def _G5(self, l, node):\n # choose link at random:\n elist = self.levgraph[l].get_edgelist()\n a, b = elist[np.random.choice(list(range(len(elist))))]\n\n # NOTE: CHANGED BEHAVIOUR: now split somewhere, not in middle:\n pos = np.random.random() # 0:a, 1:b\n\n # add node at midpoint and calc distances:\n lat = (1 - pos) * self.lat[a] + pos * self.lat[b]\n lon = (1 - pos) * self.lon[a] + pos * self.lon[b]\n self.lat.append(lat)\n self.lon.append(lon)\n\n # update graphs and rtrees:\n\n eid = self.levgraph[l].get_eid(a, b)\n d = self.levgraph[l].es[\"weight\"][eid]\n\n self.levrtree[l].insert(node, (lat, lon, lat, lon))\n for l2 in range(l + 1):\n self.cumrtree[l2].insert(node, (lat, lon, lat, lon))\n\n self._update_graphs(l, edges=[(a, b)], delete_edges=True)\n self._update_graphs(l, edges=[(a, node), (b, node)], weights=[pos * d, (1 - pos) * d])\n\n # update density:\n if np.sum(self.u) > 0:\n ds = self._get_distances([node], list(range(node)))[0, :]\n self.density = list(np.array(self.density) + 1. / ds**2) \n self.density.append((1. / ds**2).sum())\n\n if self.debug:\n print((\"G5\", (int(a), int(b))))\n\n\n def _validation(self, attr, value):\n value = np.array(value)\n if attr == \"n0\" or attr == \"n\":\n if any(value < 1):\n return False\n else:\n return True\n elif attr in [\"r\", \"u\"]:\n if any(value < 0):\n return False\n else:\n return True\n elif attr in [\"p\", \"q\", \"s\", \"w\"]:\n if any(value < 0) or any(value > 1):\n return False\n else:\n return True\n elif attr == \"L\":\n if value < 1:\n return False\n else:\n return True\n elif attr in [\"alpha\", \"beta\", \"gamma\"]:\n if value < 0 or value > 1:\n return False\n else:\n return True\n\n\n def _initial_mst(self, l):\n\n self.lev += [l for i in range(self.n0[l])]\n nodes = list(range(self.noffset, self.noffset+self.n0[l]))\n self.mst_edges[l] = elist = self._get_mst(l)\n self._update_graphs(l, nodes=nodes, edges=elist)\n\n return elist\n\n\n def _get_mst(self, l):\n nodes = list(range(self.noffset, self.noffset + self.n0[l]))\n distmatrix = self._get_distances(nodes, nodes)\n full_graph = Graph.Full(self.n0[l])\n factor = 1e5 # since small weights lead to MST problems\n weights = [factor * distmatrix[i,j] for (i,j) in full_graph.get_edgelist()]\n G = full_graph.spanning_tree(weights).as_undirected()\n return [self._s((i+self.noffset,j+self.noffset)) for (i,j) in G.get_edgelist()]\n\n\n def _get_locations(self, l, offset, _m, init=False):\n m = int(_m)\n poss = np.zeros((m,2))\n for i in range(offset, m):\n poss[i,:] = pos = self._get_coords(self.sampling, self.centre, self.boundaries)\n self.lat.append(pos[0])\n self.lon.append(pos[1])\n # update earlier rtree spatial indices:\n for l2 in range(l):\n self.cumrtree[l2].insert(i, (pos[0],pos[1],pos[0],pos[1]))\n if not init: # otherwise en bulk (below)\n nearest = list(self.cumrtree[l].nearest((pos[0],pos[1],pos[0],pos[1]),1))[0] if m > 0 else None # query before adding!\n self.levrtree[l].insert(i, (pos[0],pos[1],pos[0],pos[1]))\n self.cumrtree[l].insert(i, (pos[0],pos[1],pos[0],pos[1])) \n# self._update_distance(offset, m, m)\n if init: # bulk insert: # TODO: CAUTION: must only be used at initialization of level!\n # set up additional rtree spatial indices:\n def f():\n for i in range(offset, m):\n yield (i, (poss[i,0],poss[i,1],poss[i,0],poss[i,1]),None)\n self.levrtree[l] = lrt = rtree(f())\n self.cumrtree[l] = crt = rtree(f()) # sadly, rtrees cannot be cloned yet\n else:\n return nearest\n\n\n def _update_counters(self, level, nodes=0, edges=0):\n self.added_nodes[level] += nodes\n self.noffset += nodes\n self.added_edges[level] += edges\n\n\n def _update_graphs(self, level, nodes=[], edges=[], weights=[], delete_edges=False):\n if delete_edges:\n eid = self.levgraph[level].get_eids(edges)\n self.levgraph[level].delete_edges(eid)\n\n for l in range(level + 1):\n eid = self.cumgraph[l].get_eids(edges)\n self.cumgraph[l].delete_edges(eid)\n\n self._update_counters(level, edges=-len(edges))\n else:\n if nodes:\n # PS: this is not necessary, as all Graphs are created with size totaln.\n # otherwise, the difference between index and name is going to cause many problems\n # self.levgraph[level].add_vertices(nodes)\n # for l in range(level + 1):\n # self.cumgraph[l].add_vertices(nodes)\n\n self.levnodes[level].extend(nodes)\n for l in range(level + 1):\n self.cumnodes[l].extend(nodes)\n\n if edges:\n if not weights:\n weights = [self._get_distances([i],[j])[0,0] for (i,j) in edges]\n\n for idx, (i, j) in enumerate(edges):\n # level graphs do not contain links between levels,\n #if self.lev[i] == self.lev[j]:\n self.levgraph[level].add_edge(i, j, weight=weights[idx])\n\n for l in range(level + 1):\n self.cumgraph[l].add_edge(i, j, weight=weights[idx])\n\n self._update_counters(level, nodes=len(nodes), edges=len(edges))\n\n\n def _s(self, tuple):\n if tuple[0] < tuple[1]:\n return tuple\n else:\n return (tuple[1], tuple[0])\n\n\n#######################################################################################################################\n#######################################################################################################################\n#######################################################################################################################\n\n\ndef calc(name=\"test\", debug=False, layer_plots=True):\n\n #np.random.seed(0)\n\n # initialise algorithm\n g = RpgAlgorithm(L=3)\n assert(isinstance(g, RpgAlgorithm))\n\n # for detailed output set \n g.debug = debug\n\n# branching = np.array([6084.,84.,2.])\n\n # set desired parameters and perform algorithm\n# g.set_params(n=[100,50,9850],\n# n0=[100,50,25],\n# w=[.945,.05,.005],\n# p=[0,.1,.3],\n# q=[0, .075, .075],\n# r=[0., 0.75, 1.5],\n# s=[.2, .05, .0],\n# u=[0.,.05,.1],\n# gamma=0.95\n# )\n\n g.set_params(n=np.array([500,250,49250], dtype=int)/50,\n n0=np.array([500,250,100], dtype=int)/50,\n w=[.945,.05,.005],\n p=[0,.1,.3],\n q=[0, .075, .075],\n r=[0., 0.75, 1.5],\n s=[.2, .05, .0],\n u=[0.,.05,.1],\n beta=0.95\n )\n # use predefined locations ...\n # g.setup_locations(sampling=\"data\", locations=np.random.random([g.n, 2]))\n\n g.prepare()\n for l in range(g.L):\n g.initialise(l)\n g.grow(l)\n\n print(g)\n print(np.array(np.triu(np.tensordot(np.array(g.w), np.array(g.n), axes=0)), dtype=np.int))\n\n # if layer_plots:\n # for b in range(g.L):\n # F = g.levgraph[b].copy()\n # elist = np.array(F.get_edgelist())\n # F.es['level'] = map(lambda (a, b): min(g.lev[a], g.lev[b]), elist)\n # F.vs[\"level\"] = g.lev\n # F.vs[\"lat\"] = g.lat\n # F.vs[\"lon\"] = g.lon\n # F.vs[\"density\"] = g.density\n # plot(G=F, name=\"output_layer\"+str(b+1), groups=True)\n # del F\n\n G = g.cumgraph[0].copy()\n elist = np.array(G.get_edgelist())\n G.es['level'] = [min(g.lev[a_b[0]], g.lev[a_b[1]]) for a_b in elist]\n G.vs[\"level\"] = g.lev\n G.vs[\"lat\"] = g.lat\n G.vs[\"lon\"] = g.lon\n G.vs[\"density\"] = g.density\n\n G.write_pickle(name)\n G.write_graphml(name + \".graphml\")\n\n return G\n\n\ndef plot(G=None, name=\"output\", groups=False):\n\n if G is None:\n G = Graph.Read_Pickle(name)\n\n cols = {0: \"grey\", 1: \"blue\", 2: \"red\"}\n weights = {0: 1, 1: 1.5, 2: 2}\n sizes = weights\n\n G.vs['color'] = [cols[y] for y in G.vs[\"level\"]]\n G.es['color'] = [cols[y] for y in G.es[\"level\"]]\n G.es['width'] = [10. * weights[y] for y in G.es[\"level\"]]\n G.vs['size'] = [20. * sizes[y] for y in G.vs[\"level\"]]\n\n\n print((\"connected graph:\", G.is_connected()))\n\n from igraph import plot, Layout\n l = [(xy[0], xy[1]) for xy in np.array([G.vs[\"lat\"], G.vs[\"lon\"]]).T]\n\n w = 100 * np.sqrt(G.vcount())\n if groups:\n comp = G.clusters()\n sort = np.argsort([len(c) for c in comp])[::-1]\n comp = [comp[i] for i in sort]\n cmap = np.tile([\"grey\", \"blue\", \"red\", \"yellow\"], 3)\n group_markers = []\n\n print((\"components:\", len(comp)))\n for i, c in enumerate(comp):\n if i >= len(cmap):\n break\n print((i, len(c), cmap[i]))\n group_markers.append((c, cmap[i]))\n\n plot(G, name + \".pdf\",\n bbox=(w, w),\n layout=Layout(coords=l),\n vertex_order=np.argsort(G.vs[\"level\"]),\n mark_groups=group_markers\n )\n else:\n plot(G, name + \".pdf\",\n bbox=(w, w),\n layout=Layout(coords=l),\n vertex_order=np.argsort(G.vs[\"level\"])\n )\n # plot(G, name + \".png\",\n # bbox=(w, w),\n # layout=Layout(coords=l),\n # vertex_order=np.argsort(G.vs[\"level\"])\n # )\n\ndef collect_data(G=None, name=\"test\"):\n import pandas as pd\n\n if G is None:\n G = Graph.Read_Pickle(name)\n assert isinstance(G, Graph)\n\n df_edges = pd.DataFrame({\"length\": G.es[\"weight\"],\n \"loglength\": np.log10(G.es[\"weight\"]),\n \"level\": G.es[\"level\"]})\n\n # print \"aspl\", G.average_path_length(), \"transitivity\", G.transitivity_undirected()\n\n df_nodes = pd.DataFrame({\"level\": G.vs[\"level\"],\n \"degree\": G.vs.degree(),\n \"clust\": G.transitivity_local_undirected(),\n \"betw\": 2. * np.array(G.betweenness()) / (G.vcount() * (G.vcount() - 1.)),\n \"density\": G.vs[\"density\"],\n })\n\n return df_nodes, df_edges\n\ndef hist(df_nodes=None, df_edges=None, name=\"test\"):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n if df_nodes is None:\n df_nodes = pd.read_pickle(name+\".nodedata\")\n assert isinstance(df_nodes, pd.DataFrame)\n\n if df_edges is None:\n df_edges = pd.read_pickle(name+\".edgedata\")\n assert isinstance(df_edges, pd.DataFrame)\n\n df_edges.pivot(columns=\"level\").loglength.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(r\"$\\log_{10}$ length\")\n plt.savefig(name + \"_loglength_dist.pdf\")\n\n plt.figure()\n for i in range(3):\n no = np.where(df_nodes.level==i)[0]\n plt.plot(df_nodes.degree[no]+0.5*np.random.random(size=no.size),df_nodes.density[no],\"b.\",alpha=0.2*(i+1),ms=8*(i+1),color={0:\"grey\",1:\"blue\",\n 2:\"red\"}[i])\n plt.xlabel(\"degree\")\n plt.ylabel(\"density\")\n plt.savefig(name + \"_degree_vs_density.pdf\")\n\n df_nodes = df_nodes.pivot(columns=\"level\")\n\n df_nodes.degree.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(\"degree\")\n plt.savefig(name + \"_degree_dist.pdf\")\n\n df_nodes.clust.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(\"local transitivity\")\n plt.savefig(name + \"_clust_dist.pdf\")\n\n df_nodes.betw.plot(kind=\"hist\", bins=40, stacked=True, log=True, grid=True)\n plt.xlabel(\"shortest path betweenness\")\n plt.savefig(name + \"_betw_dist.pdf\")\n\n\nif __name__ == \"__main__\":\n name = \"test\"\n g = calc(name=name)\n plot(g, name=name)\n\n\n\n\n # from pandas import concat, read_pickle, merge\n # import time as t\n # name = \"ilg\"\n\n # nodes = []\n # edges = []\n # for sample in range(50):\n # s = t.time()\n # G = calc(name=name, debug=False)\n # print t.time() - s\n # dfn, dfe = collect_data(G=G, name=name)\n # nodes.append(dfn)\n # edges.append(dfe)\n #\n # #plot(G, groups=False, name=name)\n # df_nodes = concat(nodes)\n # df_edges = concat(edges)\n #\n # df_edges.to_pickle(name + \".edgedata\")\n # df_nodes.to_pickle(name + \".nodedata\")\n #\n # hist(df_nodes, df_edges, name=name)\n\n # hist(name=name)\n\n\n\n\n", "sub_path": "src/rpgm_neonet.py", "file_name": "rpgm_neonet.py", "file_ext": "py", "file_size_in_byte": 25957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.sum", "line_number": 93, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 103, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 204, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 255, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 256, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 260, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 262, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 264, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 274, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 275, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 275, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 276, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 276, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 293, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 339, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 370, "usage_type": "call"}, {"api_name": "igraph.Graph.Full", "line_number": 411, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 411, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 420, "usage_type": "call"}, {"api_name": "rtree.index.Index", "line_number": 438, "usage_type": "call"}, {"api_name": "rtree.index.Index", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 543, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 543, "usage_type": "call"}, {"api_name": "numpy.tensordot", "line_number": 543, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 543, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 558, "usage_type": "call"}, {"api_name": "igraph.Graph.Read_Pickle", "line_number": 574, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 574, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 594, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 596, "usage_type": "call"}, {"api_name": "igraph.plot", "line_number": 606, "usage_type": "call"}, {"api_name": "igraph.Layout", "line_number": 608, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 609, "usage_type": "call"}, {"api_name": "igraph.plot", "line_number": 613, "usage_type": "call"}, {"api_name": "igraph.Layout", "line_number": 615, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 616, "usage_type": "call"}, {"api_name": "igraph.Graph.Read_Pickle", "line_number": 628, "usage_type": "call"}, {"api_name": "igraph.Graph", "line_number": 628, "usage_type": "name"}, {"api_name": "igraph.Graph", "line_number": 629, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 631, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 632, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 637, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 640, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 651, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 652, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 655, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 656, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 659, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 659, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 660, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 660, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 662, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 662, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 664, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 665, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 665, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 665, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 665, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 667, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 667, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 668, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 668, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 669, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 669, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 674, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 674, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 675, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 675, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 678, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 678, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 679, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 679, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 682, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 682, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 683, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 683, "usage_type": "name"}, {"api_name": "igraph.plot", "line_number": 689, "usage_type": "call"}]} +{"seq_id": "353889319", "text": "import pandas as pd\nimport numpy as np\nimport numpy.linalg as la\nimport scipy as sp\nimport scipy.io as sio\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\ndef load_matlab_data(filename):\n dd = sio.loadmat(filename)\n \n x = dd.get('x')\n y = dd.get('y')\n \n return x, y\n\ndef train_test_val_split(x, y, train_f=0.7, test_f=0.15, val_f=0.15,\n seed=None):\n \"\"\"DOCSTRING!!!\n \n Hopefully this is deprecated soon in favor of cross validation\n \n Parameters\n x, y : numpy.ndarray\n Input and output data, respectively\n train_f, test_f, val_f : float\n Fractions of the data to put in each respective set. This function\n will normalize them for you\n \n Returns\n -------\n tuple of numpy.ndarray\n x and y for the training, testing, and validation set respectively\n \"\"\"\n # Get length of data\n rows, cols = x.shape\n\n # Generate randomized array of row numbers\n rand_ind = np.random.permutation(rows)\n\n # Find the index row with the max and min in each column\n min_indices = [np.argmin(x[:,i]) for i in range(cols)]\n max_indices = [np.argmax(x[:,i]) for i in range(cols)]\n # Filter non-unique values\n minmax_indices = min_indices + max_indices\n minmax_indices = list(set(minmax_indices))\n\n # Convert the train, test, val fractions into numbers\n # Take floor of each of those numbers\n train_len = np.floor(train_f * rows).astype(int)\n test_len = np.floor(test_f * rows).astype(int)\n val_len = np.floor(val_f * rows).astype(int)\n\n # Split array of row numbers into train_ix, test_ix, val_ix\n val_ixs = list(rand_ind[:val_len])\n test_ixs = list(rand_ind[val_len:val_len + test_len])\n train_ixs = list(rand_ind[val_len + test_len:])\n\n # Add the max and min indices to training set\n train_ixs.extend(minmax_indices)\n\n # Slice up x and y\n x_train = x[train_ixs]\n y_train = y[train_ixs]\n\n x_test = x[test_ixs]\n y_test = y[test_ixs]\n\n x_val = x[val_ixs]\n y_val = y[val_ixs]\n\n return x_train, y_train, x_test, y_test, x_val, y_val\n\n \ndef rmse(y_true, y_false):\n return np.sqrt(np.sum((y_true - y_false) ** 2) / y_true.flatten().shape[0])\n", "sub_path": "ne579/Homework/hw05/utilities.py", "file_name": "utilities.py", "file_ext": "py", "file_size_in_byte": 2218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "scipy.io.loadmat", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.random.permutation", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "106316379", "text": "import argparse\nimport json\nfrom pandas import Timestamp, Timedelta # Use pandas time objects for nanosecond precision\nimport os\n\n\nparser = argparse.ArgumentParser(description=\"Convert TIMEUTC, PVT and HPPOSLLH into accurate GPS coordinates\")\nparser.add_argument(\"file\", help=\"ubx JSONL file\")\nparser.add_argument(\"-low\", help=\"Force to use low precision location\", action=\"store_true\")\n\n\nUBLOX_LATLON_SCALE = 1e-7\nUBLOX_LATLON_HP_SCALE = 1e-2\nUBLOX_ACC_SCALE = 1e-3\nMM_TO_METERS = 1e-3\n\n\ndef extractTimestamp(raw):\n if raw.get(\"valid\", {}).get(\"validTime\") or raw.get(\"valid\", {}).get(\"validUTC\"):\n # nano can be negative, so add it via Timedelta\n ts = Timestamp(\n year = raw[\"year\"],\n month = raw[\"month\"],\n day = raw[\"day\"],\n hour = raw[\"hour\"],\n minute = raw[\"min\"],\n second = raw[\"sec\"]\n ) + Timedelta(value = raw[\"nano\"], unit = \"nanoseconds\")\n else:\n ts = None\n return ts\n\n\ndef extractHighPrecisionLocation(raw):\n # Precise latitude in deg * 1e-7 = lat + (latHp * 1e-2)\n lat = (raw[\"lat\"] + raw[\"latHp\"] * UBLOX_LATLON_HP_SCALE) * UBLOX_LATLON_SCALE\n lon = (raw[\"lon\"] + raw[\"lonHp\"] * UBLOX_LATLON_HP_SCALE) * UBLOX_LATLON_SCALE\n # Precise height in mm = hMSL + (hMSLHp * 0.1)\n alt = (raw[\"hMSL\"] + raw[\"hMSLHp\"] * 0.1) * MM_TO_METERS\n acc = raw[\"hAcc\"] * MM_TO_METERS\n accV = raw[\"vAcc\"] * MM_TO_METERS\n return (lat, lon, alt, acc, accV)\n\n\ndef extractLocation(raw):\n lat = raw[\"lat\"] * UBLOX_LATLON_SCALE\n lon = raw[\"lon\"] * UBLOX_LATLON_SCALE\n alt = raw[\"hMSL\"] * MM_TO_METERS\n acc = raw[\"hAcc\"] * MM_TO_METERS\n accV = raw[\"vAcc\"] * MM_TO_METERS\n return (lat, lon, alt, acc, accV)\n\n\ndef buildMeasurement(group, useHighPrecision=True, itow=None):\n ts = None\n\n if group.get(\"PVT\"):\n ts = extractTimestamp(group.get(\"PVT\"))\n elif group.get(\"TIMEUTC\"):\n ts = extractTimestamp(group.get(\"TIMEUTC\"))\n if not ts:\n if itow: print(\"Valid timestamp missing, skipping iTOW={}\".format(itow))\n return None\n\n acc = None\n if group.get(\"HPPOSLLH\"):\n if useHighPrecision:\n lat, lon, alt, acc, accV = extractHighPrecisionLocation(group.get(\"HPPOSLLH\"))\n else:\n lat, lon, alt, acc, accV = extractLocation(group.get(\"HPPOSLLH\"))\n elif not useHighPrecision and group.get(\"PVT\"):\n lat, lon, alt, acc, accV = extractLocation(group.get(\"PVT\"))\n if not acc:\n if itow: print(\"Valid location missing, skipping iTOW={}\".format(itow))\n return None\n\n measurement = {\n \"time\": ts.timestamp(),\n \"lat\": lat,\n \"lon\": lon,\n \"altitude\": alt,\n \"accuracy\": acc,\n \"verticalAccuracy\": accV\n }\n\n pvt = group.get(\"PVT\")\n if pvt:\n measurement[\"velocity\"] = {\n \"north\": pvt[\"velN\"] * MM_TO_METERS,\n \"east\": pvt[\"velE\"] * MM_TO_METERS,\n \"down\": pvt[\"velD\"] * MM_TO_METERS,\n }\n measurement[\"groundSpeed\"] = pvt[\"gSpeed\"] * MM_TO_METERS\n measurement[\"speedAccuracy\"] = pvt[\"sAcc\"] * MM_TO_METERS\n return measurement\n\n\ndef run(args):\n inputFile = os.path.splitext(args.file)\n outputFile = inputFile[0] + \"-gps\" + inputFile[1]\n print(\"Starting processing\")\n\n # Group data based on iTOW, they belong to same navigation solution\n useHighPrecision = False\n itowGroups = {}\n with open(args.file) as f:\n lines = f.readlines()\n for line in lines:\n msg = json.loads(line)\n msgType = msg[\"type\"]\n if msgType == \"HPPOSLLH\": useHighPrecision = True\n if msgType == \"PVT\" or msgType == \"HPPOSLLH\" or msgType == \"TIMEUTC\":\n payload = msg[\"payload\"]\n itow = payload[\"iTOW\"]\n group = itowGroups.get(itow)\n if not group:\n group = {\"iTOW\": itow}\n itowGroups[itow] = group\n group[msgType] = payload\n\n if args.low:\n useHighPrecision = False\n\n if useHighPrecision:\n print(\"Found HPPOSLLH events, only using them for high precision. PVT events excluded. Use -low flag to disable.\")\n else:\n print(\"Using low precision mode\")\n\n # Convert groups into GPS coordinates\n coordinates = []\n for itow in itowGroups:\n group = itowGroups[itow]\n measurement = buildMeasurement(group, useHighPrecision, itow)\n if measurement:\n coordinates.append(measurement)\n\n coordinates.sort(key=lambda x: x[\"time\"])\n\n with open(outputFile, \"w\") as writer:\n for coord in coordinates:\n writer.write(json.dumps(coord) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n run(args)\n print(\"Done!\")\n", "sub_path": "gps_converter.py", "file_name": "gps_converter.py", "file_ext": "py", "file_size_in_byte": 4789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 109, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "638570234", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Module that harvests approval data from Ilias.\n\nHow you should do it:\n\n.. code-block:: python\n\n import pysimplesoap\n\n # WARNING: Ilias requires us to conform to the given parameter order\n # For that reason, we build XML data by hand rather using the\n # automatic kwargs conversion of pysimplesoap\n\n cl = pysimplesoap.client.SoapClient(\n location='https://ilias.studium.kit.edu/webservice/soap/server.php',\n namespace='urn:ilUserAdministration'\n )\n\n def get_sid(client):\n element = SimpleXMLElement(\n ''\n 'produktiv'\n '????????'\n '????????'\n ''\n )\n response = client.call('login', element)\n return str(response('sid'))\n\n sid = get_sid(cl)\n ref_id=????????\n\n def get_data(client, sid, ref_id):\n element = SimpleXMLElement(\n ''\n '{}'\n '{}'\n 'false'\n ''.format(sid, ref_id)\n )\n response = client.call('getTestResults', element)\n reutrn SimpleXMLElement(str(response('xml')))\n\n data = get_data(cl, sid, ref_id)\n for row in data.rows.row:\n mtr = row.column[4]\n # sadly, this is where it stops working, because all relevant data (all scores) are zero,\n # thanks to the shitty, untested Ilias SOAP interface.\n\n\nSo instead, we are emulating the requests that a browser would do to download the data.\n\"\"\"\n\nimport csv\n\nimport requests\n\nfrom spz import app, db, models\n\n\n# headers that will be used for all Ilias HTTPS requests\nheaders = {\n 'Accept-Language': 'en-US,en;q=0.8,de-DE;q=0.5,de;q=0.3',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0',\n}\n\n\ndef extract_text(s, token_begin, token_end):\n \"\"\"Extract text from string that is located between 2 tokens.\"\"\"\n l0 = len(token_begin)\n idx0 = s.find(token_begin)\n assert idx0 > -1\n\n begin = idx0 + l0\n\n idx1 = s.find(token_end, begin)\n assert idx1 > -1\n\n end = idx1\n return s[begin:end]\n\n\ndef download_data():\n \"\"\"Download relevant CSV data from Ilias.\n\n Returns line->byte iterator.\"\"\"\n # get inital cookies\n url0 = '{}login.php'.format(app.config['ILIAS_URL'])\n r0 = requests.get(\n url0,\n params={\n 'client_id': 'produktiv',\n 'cmd': 'force_login',\n 'lang': 'de',\n 'target': '',\n },\n headers=headers\n )\n assert r0.status_code == 200\n cookies = r0.cookies\n\n # login\n url1 = '{}ilias.php'.format(app.config['ILIAS_URL'])\n r1 = requests.post(\n url1,\n params={\n 'baseClass': 'ilStartUpGUI',\n 'client_id': 'produktiv',\n 'cmd': 'post',\n 'cmdClass': 'ilstartupgui',\n 'cmdNode': 'fp',\n 'lang': 'de',\n 'rtoken': '',\n },\n data={\n 'cmd[showLogin]': 'Anmelden',\n 'password': app.config['ILIAS_PASSWORD'],\n 'username': app.config['ILIAS_USERNAME'],\n },\n cookies=cookies,\n headers=headers\n )\n assert r1.status_code == 200\n cookies = r1.history[0].cookies # use cookies of first request\n\n # get bunch of metadata\n url2 = '{}ilias.php'.format(app.config['ILIAS_URL'])\n r2 = requests.get(\n url2,\n params={\n 'baseClass': 'ilObjTestGUI',\n 'cmd': 'outEvaluation',\n 'cmdClass': 'iltestevaluationgui',\n 'cmdNode': 'a2:a5',\n 'ref_id': app.config['ILIAS_REFID'],\n },\n cookies=cookies,\n headers=headers\n )\n assert r2.status_code == 200\n text2 = r2.text\n # these tokens occur multiple times but seem to be unique\n rtoken = extract_text(text2, \"rtoken=\", \"&\")\n active_id = extract_text(text2, \"active_id=\", \"&\")\n\n # prepare form / virtual table so we get all the information we need\n # without this step, the \"Matrikelnummer\" won't be present.\n # WARNING: this change is stateful (i.e. Ilias keeps track of it, not the URI / cookie / session storage / ...)\n url3 = '{}ilias.php'.format(app.config['ILIAS_URL'])\n r3 = requests.post(\n url3,\n params={\n 'baseClass': 'ilObjTestGUI',\n 'cmd': 'post',\n 'cmdClass': 'iltestevaluationgui',\n 'cmdNode': 'a2:a5',\n 'fallbackCmd': 'outEvaluation',\n 'ref_id': app.config['ILIAS_REFID'],\n 'rtoken': rtoken,\n },\n data={\n 'cmd[outEvaluation]': 'Aktualisieren',\n 'course': '',\n 'group': '',\n 'name': '',\n 'tblfshtst_eval_all': '1',\n 'tblfstst_eval_all[]': 'matriculation',\n 'tst_eval_all_table_nav': 'name:asc:0',\n 'tst_eval_all_table_nav1': 'name:asc:0',\n 'tst_eval_all_table_nav2': 'name:asc:0',\n },\n cookies=cookies,\n headers=headers\n )\n assert r3.status_code == 200\n\n # download file\n url4 = '{}ilias.php'.format(app.config['ILIAS_URL'])\n r4 = requests.post(\n url4,\n params={\n 'active_id': active_id,\n 'baseClass': 'ilObjTestGUI',\n 'cmd': 'post',\n 'cmdClass': 'iltestevaluationgui',\n 'cmdNode': 'a2:a5',\n 'fallbackCmd': 'exportEvaluation',\n 'ref_id': app.config['ILIAS_REFID'],\n 'rtoken': rtoken,\n },\n data={\n 'cmd[exportEvaluation]': 'Export',\n 'export_type': 'csv',\n },\n cookies=cookies,\n headers=headers\n )\n assert r4.status_code == 200\n # don't use r2.text here, it's very very slow!\n it = r4.iter_lines()\n\n # logout\n url5 = '{}logout.php'.format(app.config['ILIAS_URL'])\n r5 = requests.post(\n url5,\n params={\n 'lang': 'de',\n },\n cookies=cookies,\n headers=headers\n )\n assert r5.status_code == 200\n\n return it\n\n\ndef parse_data(it):\n \"\"\"Parse CSV string from Ilias into list of Approval objects.\"\"\"\n # do lazy string conversion for performance reasons\n # WARNING: Ilias emits invalid Unicode characters!\n fp = (line.decode('utf-8', 'replace') for line in it)\n reader = csv.reader(fp, dialect=csv.excel, delimiter=';')\n\n # get first row and do sanity check\n # don't be to smart here so we get errors in case the Ilias output changes.\n # when this happens we are going to double check the parser\n head = next(reader)\n assert head[0] == 'Name'\n assert head[1] == 'Benutzername'\n assert head[2] == 'Matrikelnummer'\n assert head[3] == 'Testergebnis in Punkten'\n assert head[4] == 'Maximal erreichbare Punktezahl'\n assert head[5] == 'Testergebnis als Note'\n # don't care about the rest\n\n # parse file\n approvals = []\n for idx, row in enumerate(reader, 1):\n # for some reason, Ilias emits a new header before every line,\n # so we only parse every second line.\n if idx % 2 == 0:\n continue\n\n # ==========================\n # == 1. get right columns ==\n # ==========================\n s_user = row[1]\n s_idnumber = row[2]\n s_points_got = row[3]\n s_points_max = row[4]\n\n # some lines might be empty/invalid\n if not ((bool(s_user) or bool(s_idnumber)) and bool(s_points_got) and bool(s_points_max)):\n continue\n\n # ==========================\n # == 2. parse data ==\n # ==========================\n # normalized tag\n # prefer idnumber but fallback to username (e.g. for staff members)\n tag = s_idnumber.strip().lower() or s_user.strip().lower()\n\n # do not catch the exception here, so we know when something goes wrong\n points_got = int(s_points_got)\n points_max = int(s_points_max)\n\n # last sanity check\n if not (tag and points_got >= 0 and points_max > 0):\n continue\n\n # ==========================\n # == 3. create objects ==\n # ==========================\n # limit rating to [0, 100] because for some reason,\n # points_got might be bigger than points_max\n rating = max(\n 0,\n min(\n int(100 * points_got / points_max),\n 100\n )\n )\n\n # finally add approval to output list\n approvals.append(\n models.Approval(\n tag=tag,\n percent=rating,\n sticky=False,\n priority=False\n )\n )\n\n return approvals\n\n\ndef refresh():\n \"\"\"Overwrite approvals in DB with newest Ilias data.\"\"\"\n it = download_data()\n approvals = parse_data(it)\n\n # start transaction rollback area\n try:\n # remove all non-sticky entries from DB\n models.Approval.query.filter(models.Approval.sticky == False).delete() # NOQA\n\n # add all new approvals\n db.session.add_all(approvals)\n db.session.commit()\n except Exception:\n db.session.rollback()\n raise\n", "sub_path": "images/spz/spz/iliasharvester.py", "file_name": "iliasharvester.py", "file_ext": "py", "file_size_in_byte": 9339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "spz.app.config", "line_number": 89, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 89, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "spz.app.config", "line_number": 104, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 104, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 105, "usage_type": "call"}, {"api_name": "spz.app.config", "line_number": 118, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 118, "usage_type": "name"}, {"api_name": "spz.app.config", "line_number": 119, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 119, "usage_type": "name"}, {"api_name": "spz.app.config", "line_number": 128, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 128, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 129, "usage_type": "call"}, {"api_name": "spz.app.config", "line_number": 136, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 136, "usage_type": "name"}, {"api_name": "spz.app.config", "line_number": 150, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 150, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 151, "usage_type": "call"}, {"api_name": "spz.app.config", "line_number": 159, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 159, "usage_type": "name"}, {"api_name": "spz.app.config", "line_number": 179, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 179, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 180, "usage_type": "call"}, {"api_name": "spz.app.config", "line_number": 189, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 189, "usage_type": "name"}, {"api_name": "spz.app.config", "line_number": 204, "usage_type": "attribute"}, {"api_name": "spz.app", "line_number": 204, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 205, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 223, "usage_type": "call"}, {"api_name": "csv.excel", "line_number": 223, "usage_type": "attribute"}, {"api_name": "spz.models.Approval", "line_number": 287, "usage_type": "call"}, {"api_name": "spz.models", "line_number": 287, "usage_type": "name"}, {"api_name": "spz.models.Approval.query.filter", "line_number": 306, "usage_type": "call"}, {"api_name": "spz.models.Approval", "line_number": 306, "usage_type": "attribute"}, {"api_name": "spz.models", "line_number": 306, "usage_type": "name"}, {"api_name": "spz.db.session.add_all", "line_number": 309, "usage_type": "call"}, {"api_name": "spz.db.session", "line_number": 309, "usage_type": "attribute"}, {"api_name": "spz.db", "line_number": 309, "usage_type": "name"}, {"api_name": "spz.db.session.commit", "line_number": 310, "usage_type": "call"}, {"api_name": "spz.db.session", "line_number": 310, "usage_type": "attribute"}, {"api_name": "spz.db", "line_number": 310, "usage_type": "name"}, {"api_name": "spz.db.session.rollback", "line_number": 312, "usage_type": "call"}, {"api_name": "spz.db.session", "line_number": 312, "usage_type": "attribute"}, {"api_name": "spz.db", "line_number": 312, "usage_type": "name"}]} +{"seq_id": "161952545", "text": "import cv2\nimport socket\nimport base64\nimport numpy as np\n\"\"\"\nScript considers the IP to be IPv4 only, else change AF_IFNET -> AF_INET6\n\"\"\"\nIP_SERVER = '' #fill server IP\nPORT_SERVER = 953\nTIMEOUT_SOCKET = 10\nSIZE_PACKAGE = 4096\n\nIMAGE_HEIGHT = 480\nIMAGE_WIDTH = 640\nCOLOR_PIXEL = 3 # RGB\n\n\nif __name__ == '__main__':\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n connection.settimeout(TIMEOUT_SOCKET)\n connection.connect((IP_SERVER, PORT_SERVER))\n\n while True:\n try:\n fileDescriptor = connection.makefile(mode='rb')\n result = fileDescriptor.readline()\n fileDescriptor.close()\n result = base64.b64decode(result)\n\n frame = np.frombuffer(result, dtype=np.uint8)\n frame_matrix = np.array(frame)\n try:\n frame_matrix = np.reshape(frame_matrix, (IMAGE_HEIGHT, IMAGE_WIDTH,\n COLOR_PIXEL))\n except:\n pass\n cv2.imshow('Window title', frame_matrix)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n except Exception as e:\n print(\"[Error] \" + str(e))\n\n connection.close()\n", "sub_path": "tcp-streaming-multicast-client-webcam.py", "file_name": "tcp-streaming-multicast-client-webcam.py", "file_ext": "py", "file_size_in_byte": 1296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "socket.socket", "line_number": 19, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 19, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 19, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 20, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 20, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "533522954", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom .models import Submission\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass SubmissionForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Submission\n\t\tfields = ['state', 'bucket', 'bidsdir', 'jobdir', 'creds_file', 'datasetname', 'modality', 'slice_timing', 'data_file', 'upload_data_or_not']\n\t\tlabels={\n\t\t\t'state':_('Analysis Level'),\n\t\t\t'bucket':_('S3 Bucket Name'),\n\t\t\t'bidsdir':_('BIDS Directory'),\n\t\t\t'jobdir':_('Unique Token'),\n\t\t\t'creds_file':_('AWS Credentials File'),\n\t\t\t'datasetname':_('Dataset Name'),\n\t\t\t'modality':_('Modality'),\n\t\t\t'slice_timing':_('Slice Timing Method'),\n\t\t\t'data_file':_('Local Data (Zipped)'),\n\t\t\t'upload_data_or_not':_('Upload Local Data?')\n\t\t}\n\t\thelp_texts={\n\t\t\t'state':_('Level of analysis to perform'),\n\t\t\t'bucket':_('Name of S3 bucket where data lives'),\n\t\t\t'bidsdir':_('Path on S3 bucket where data lives'),\n\t\t\t'jobdir':_('Unique identifier for job submission to facilitate later queries'),\n\t\t\t'creds_file':_('File containing user credentials for AWS services'),\n\t\t\t'datasetname':_('Dataset name (group analysis)'),\n\t\t\t'modality':_('Modality of data'),\n\t\t\t'slice_timing':_('The method in which slices were acquired.'),\n\t\t\t'data_file':_('Local data to be uploaded to S3 bucket(stored in BIDS formatting and zipped).'),\n\t\t\t'upload_data_or_not':_('Whether or not the local data should be uploaded to S3.')\n\t\t}\n", "sub_path": "fngs/analyze/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Submission", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 12, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 14, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 15, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 17, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 18, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 21, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 28, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 29, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 30, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 31, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 32, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "486971472", "text": "import xml.etree.ElementTree as ET\nimport sys\n\nclass Module():\n def __init__(self, name, baseAddress):\n self.name = name\n self.baseAddress = baseAddress\n self.registerGroupsAndUngroupedRegisters = []\n\nclass RegisterGroup():\n def __init__(self, name, baseAddress):\n self.name = name\n self.baseAddress = baseAddress\n self.registers = []\n\nclass Register():\n def __init__(self, name, baseAddress, description, resetValue):\n self.name = name\n self.baseAddress = baseAddress\n self.description = description\n self.resetValue = resetValue\n self.bitfields = []\n\n def createCDefines(self, moduleName, regGroupName, cppName, value):\n \"\"\"Create C header entries for registers\"\"\"\n headerString = '#define ' + 'REG' + '_' + \\\n moduleName + '_'\n headerString += regGroupName + '_' if regGroupName else ''\n headerString += self.name + '_' + \\\n cppName + ' ' + \\\n hex(value) + '\\n'\n return headerString\n\nclass Bitfield():\n def __init__(self, name, offset, size, description):\n self.name = name\n self.offset = offset\n self.size = size\n self.description = description\n\n def createCDefines(self, moduleName, regGroupName, regName, cppName, value):\n \"\"\"Create C header entries for bitfields\"\"\"\n headerString = '#define ' + 'BF' + '_' + \\\n moduleName + '_'\n headerString += regGroupName + '_' if regGroupName else ''\n headerString += regName + '_' + \\\n self.name + '_' + \\\n cppName + ' ' + \\\n hex(value) + '\\n'\n return headerString\n\nif __name__ == \"__main__\":\n inputXmlFilename = sys.argv[1]\n outputHeaderFilename = sys.argv[2]\n\n namespace = \"{http://www.spiritconsortium.org/XMLSchema/SPIRIT/1.5}\"\n tree = ET.parse(inputXmlFilename)\n root = tree.getroot()\n\n memoryMaps = root.find(namespace + 'memoryMaps')\n memoryMap = memoryMaps.find(namespace + 'memoryMap')\n addressBlock = memoryMap.find(namespace + 'addressBlock')\n\n registerFiles = addressBlock.findall(namespace + 'registerFile')\n name = addressBlock.find(namespace + 'name').text.upper()\n baseAddress = int(addressBlock.find(namespace + 'baseAddress').text, 0)\n module = Module(name, baseAddress)\n\n def extractRegistersAndBitfields(parentList, registers):\n for register in registers:\n name = register.find(namespace + 'name').text.upper()\n baseAddress = int(register.find(namespace + 'addressOffset').text, 0)\n resetNode = register.find(namespace + 'reset')\n description = \"\" if register.find(namespace + 'description') is None else register.find(namespace + 'description').text\n resetValue = int(resetNode.find(namespace + 'value').text, 0)\n resetMask = int(resetNode.find(namespace + 'mask').text, 0)\n resetValue = resetValue & resetMask\n reg = Register(name, baseAddress, description, resetValue)\n parentList.append(reg)\n fields = register.findall(namespace + 'field')\n for field in fields:\n name = field.find(namespace + 'name').text.upper()\n description = \"\" if field.find(namespace + 'description') is None else field.find(namespace + 'description').text\n bitOffset = int(field.find(namespace + 'bitOffset').text, 0)\n bitWidth = int(field.find(namespace + 'bitWidth').text, 0)\n bitfield = Bitfield(name, bitOffset, bitWidth, description)\n reg.bitfields.append(bitfield)\n\n # Extract grouped registers\n for registerFile in registerFiles:\n name = registerFile.find(namespace + 'name').text.upper()\n baseAddress = int(registerFile.find(namespace + 'addressOffset').text, 0)\n regGroup = RegisterGroup(name, baseAddress)\n module.registerGroupsAndUngroupedRegisters.append(regGroup)\n registers = registerFile.findall(namespace + 'register')\n extractRegistersAndBitfields(regGroup.registers, registers)\n\n # Extract ungrouped registers\n unGroupedRegisters = addressBlock.findall(namespace + 'register')\n extractRegistersAndBitfields(module.registerGroupsAndUngroupedRegisters, unGroupedRegisters)\n\n # Sort registers and bitfields by address and offset\n module.registerGroupsAndUngroupedRegisters.sort(key=lambda x: x.baseAddress, reverse=False)\n for regGroup in module.registerGroupsAndUngroupedRegisters:\n regGroup.registers.sort(key=lambda x: x.baseAddress, reverse=False)\n for register in regGroup.registers:\n register.bitfields.sort(key=lambda x: x.offset, reverse=False)\n\n def createCDefines(module, regGroup, register):\n \"\"\"Create C defines for the registers and bitfields\n\n Arguments: module - top-level module\n regGroup - register group (optional)\n register - register object \"\"\"\n baseAddress = module.baseAddress + regGroup.baseAddress if regGroup is not None else 0 \n headerString = ''\n if register.description:\n headerString += '/* ' + \\\n module.name + '_'\n headerString += regGroup.name + '_' if regGroup is not None else ''\n headerString += register.name + ': ' + \\\n register.description + ' */\\n'\n headerString += register.createCDefines(module.name, regGroup.name, 'ADDRESS', register.baseAddress + baseAddress)\n headerString += register.createCDefines(module.name, regGroup.name, 'RESET', register.resetValue)\n headerString += '\\n'\n for bitfield in register.bitfields:\n headerString += '/* ' + \\\n module.name + '_'\n headerString += regGroup.name + '_' if regGroup is not None else ''\n headerString += register.name + '_' + \\\n bitfield.name + ': ' + \\\n bitfield.description + ' */\\n'\n headerString += bitfield.createCDefines(module.name, regGroup.name, register.name, 'OFFSET', bitfield.offset)\n mask = ((1 << bitfield.size) - 1) << bitfield.offset\n headerString += bitfield.createCDefines(module.name, regGroup.name, register.name, 'MASK', mask)\n headerString += '\\n'\n headerString += '\\n'\n return headerString\n\n # Create header file string\n headerString = \"\"\n for regGroup in module.registerGroupsAndUngroupedRegisters:\n try:\n for register in regGroup.registers:\n headerString += createCDefines(module, regGroup, register)\n except Exception as ex:\n register = regGroup\n headerString += createCDefines(module, None, register)\n headerString += '\\n'\n\n f = open(outputHeaderFilename, 'w')\n f.write(headerString)\n", "sub_path": "dp_sumit/dp_sumit/bb_ge/components/ibuild/etc/extract_header_from_reg_xml.py", "file_name": "extract_header_from_reg_xml.py", "file_ext": "py", "file_size_in_byte": 7000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.argv", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 57, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "58076433", "text": "from django.db import models\nfrom django.utils.text import slugify\n\nfrom categories.managers import CategoryManager\n\n\nclass Category(models.Model):\n name = models.CharField('Name', max_length=190, db_index=True)\n slug = models.SlugField('Slug', max_length=190, blank=True, db_index=True, unique=True)\n sort_id = models.PositiveIntegerField('Ordering', blank=True, default=2147483647)\n parent = models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.CASCADE)\n\n objects = CategoryManager()\n\n class Meta:\n db_table = 'categories'\n verbose_name_plural = 'Categories'\n ordering = ('sort_id', 'name')\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, allow_unicode=True)\n super().save(*args, **kwargs)\n", "sub_path": "src/categories/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "categories.managers.CategoryManager", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "151751940", "text": "#\n# Copyright 2015 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport itertools\nimport logging\n\nfrom cliff import show\n\nfrom ironicclient.common import utils\nfrom ironicclient.v1 import resource_fields as res_fields\n\n\nclass CreateBaremetalPort(show.ShowOne):\n \"\"\"Create a new port\"\"\"\n\n log = logging.getLogger(__name__ + \".CreateBaremetalPort\")\n\n def get_parser(self, prog_name):\n parser = super(CreateBaremetalPort, self).get_parser(prog_name)\n\n parser.add_argument(\n 'address',\n metavar='
',\n help='MAC address for this port.')\n parser.add_argument(\n '--node',\n dest='node_uuid',\n metavar='',\n required=True,\n help='UUID of the node that this port belongs to.')\n parser.add_argument(\n '--extra',\n metavar=\"\",\n action='append',\n help=\"Record arbitrary key/value metadata. \"\n \"Can be specified multiple times.\")\n parser.add_argument(\n '-l', '--local-link-connection',\n metavar=\"\",\n action='append',\n help=\"Key/value metadata describing Local link connection \"\n \"information. Valid keys are switch_info, switch_id, \"\n \"port_id. Can be specified multiple times.\")\n parser.add_argument(\n '--pxe-enabled',\n metavar='',\n help='Indicates whether this Port should be used when '\n 'PXE booting this Node.')\n\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\" % parsed_args)\n baremetal_client = self.app.client_manager.baremetal\n\n field_list = ['address', 'extra', 'node_uuid', 'pxe_enabled',\n 'local_link_connection']\n fields = dict((k, v) for (k, v) in vars(parsed_args).items()\n if k in field_list and v is not None)\n fields = utils.args_array_to_dict(fields, 'extra')\n fields = utils.args_array_to_dict(fields, 'local_link_connection')\n port = baremetal_client.port.create(**fields)\n\n data = dict([(f, getattr(port, f, '')) for f in\n res_fields.PORT_DETAILED_RESOURCE.fields])\n\n return self.dict2columns(data)\n\n\nclass ShowBaremetalPort(show.ShowOne):\n \"\"\"Show baremetal port details.\"\"\"\n\n log = logging.getLogger(__name__ + \".ShowBaremetalPort\")\n\n def get_parser(self, prog_name):\n parser = super(ShowBaremetalPort, self).get_parser(prog_name)\n parser.add_argument(\n \"port\",\n metavar=\"\",\n help=\"UUID of the port (or MAC address if --address is specified).\"\n )\n parser.add_argument(\n '--address',\n dest='address',\n action='store_true',\n default=False,\n help=' is the MAC address (instead of the UUID) of the port.')\n parser.add_argument(\n '--fields',\n nargs='+',\n dest='fields',\n metavar='',\n action='append',\n choices=res_fields.PORT_DETAILED_RESOURCE.fields,\n default=[],\n help=\"One or more port fields. Only these fields will be fetched \"\n \"from the server.\")\n return parser\n\n def take_action(self, parsed_args):\n self.log.debug(\"take_action(%s)\", parsed_args)\n\n baremetal_client = self.app.client_manager.baremetal\n fields = list(itertools.chain.from_iterable(parsed_args.fields))\n fields = fields if fields else None\n\n if parsed_args.address:\n port = baremetal_client.port.get_by_address(\n parsed_args.port, fields=fields)._info\n else:\n port = baremetal_client.port.get(\n parsed_args.port, fields=fields)._info\n\n port.pop(\"links\", None)\n return zip(*sorted(port.items()))\n", "sub_path": "ironicclient/osc/v1/baremetal_port.py", "file_name": "baremetal_port.py", "file_ext": "py", "file_size_in_byte": 4520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cliff.show.ShowOne", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cliff.show", "line_number": 26, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "ironicclient.common.utils.args_array_to_dict", "line_number": 73, "usage_type": "call"}, {"api_name": "ironicclient.common.utils", "line_number": 73, "usage_type": "name"}, {"api_name": "ironicclient.common.utils.args_array_to_dict", "line_number": 74, "usage_type": "call"}, {"api_name": "ironicclient.common.utils", "line_number": 74, "usage_type": "name"}, {"api_name": "ironicclient.v1.resource_fields.PORT_DETAILED_RESOURCE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "ironicclient.v1.resource_fields", "line_number": 78, "usage_type": "name"}, {"api_name": "cliff.show.ShowOne", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cliff.show", "line_number": 83, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 86, "usage_type": "call"}, {"api_name": "ironicclient.v1.resource_fields.PORT_DETAILED_RESOURCE", "line_number": 107, "usage_type": "attribute"}, {"api_name": "ironicclient.v1.resource_fields", "line_number": 107, "usage_type": "name"}, {"api_name": "itertools.chain.from_iterable", "line_number": 117, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 117, "usage_type": "attribute"}]} +{"seq_id": "518794114", "text": "### THIS MODULE RENDERS THE TEMPLATES FROM THE JINJA2 FILES\n### AND PACKAGES THEM INTO A LIST OF LISTS. IT ONLY LOOKS AT THE \n### SELECTED INDEXES (INIITIALIZE.ELEMENT) OF THE NODE_OBJECT. \n### THE CONFIGURATIONS ARE STORED IN THE GLOBAL VARIABLE CALL \n### INITIALIZE.CONFIGURATION.\n\nfrom jinja2 import Environment, FileSystemLoader\nfrom ciscoconfparse import CiscoConfParse\nfrom collections import Counter\nfrom multithread import multithread_engine\nfrom get_property import get_template_directory\nfrom get_property import get_updated_list\nfrom get_property import get_syntax\nfrom get_property import get_sorted_juniper_template_list \nimport re\nimport initialize\n\ndef auditdiff_engine(template_list,node_object,auditcreeper,output,remediation):\n\n\tredirect = [] \n\tcommand = [] \n\t### RENDERED_CONFIG IS TO ACCOMODATE JUNIPER PLATFORM BY APPENDING A 'LOAD REPLACE TERMINAL' TO GET THE DIFF OUTPUT\n\trendered_config = []\n\trendered_config.append('load replace terminal')\n\tedit_list = []\n\tno_diff = 0\n\n\t### PUSH_CONFIGS IS A LIST OF THE FINAL CONFIGS TO BE PUSHED\n#\tpush_configs = []\n\n\t### INDEX_POSITION IS THE INDEX OF ALL THE MATCHED FILTER_CONFIG AGAINST THE BACKUP_CONFIGS. THE INDEX IS COMING FROM THE BACKUP_CONFIG\n\tindex_position = 0\n\n\t### NODE_INDEX KEEPS TRACK OF THE INDEX IN INITIALIZE.NTW_DEVICE. IF REMEDIATION IS NOT REQUIRED (CONFIGS MATCHES TEMPLATE), THEN THE NODE IS POPPED OFF\n\t### INITIALIZE.NTW_DEVICE AND NOTHING IS CHANGED ON THAT DEVICE\n\tnode_index = 0 \n\n\t### AUDIT_FILTER_RE IS THE REGULAR EXPRESSION TO FILTER OUT THE AUDIT FILTER IN EVERY TEMPLATE\n\tAUDIT_FILTER_RE = r\"\\[.*\\]\"\n\n\t### TEMPLATE_LIST_COPY TAKE A COPY OF THE CURRENT TEMPLATE_LIST\n\ttemplate_list_original = template_list[:]\n\ttemplate_list_copy = template_list\n\n\tif(auditcreeper):\n\t\ttemplate_list = template_list_copy[0]\n\n#\tprint \"TEMPLATE_LIST: {}\".format(template_list)\n\n\t### THIS SECTION OF CODE WILL GATHER ALL RENDERED CONFIGS FIRST AS IT'S REQUIRED FOR ALL PLATFORMS (CISCO & JUNIPER)\n\t### JUNIPER DOES NOT REQUIRE BACKUP-CONFIGS IN ORDER TO BE DIFFED SO INSTEAD IT WILL JUST PUSH (PUSH_CFGS) THE TEMPLATE AND PERFORM THE DIFF ON THE DEVICE ITSELF.\n\t### CISCO WILL REQUIRE BACKUP-CONFIGS (GET_CONFIG)\n\tfor index in initialize.element:\n\n\t\tif(node_object[index]['platform'] == 'juniper'):\n\n\t\t\t### THIS WILL RETURN A SORTED JUNIPER TEMPLATE LIST BASED ON JUNIPER'S 'SHOW CONFIGURATION' OUTPUT\n\t\t\ttemplate_list = get_sorted_juniper_template_list(template_list)\n#\t\t\tprint(\"TEMPLATE_LIST FIRST PHASE: {}\".format(template_list))\n\n\t\tfor template in template_list:\n\n\t\t\t### THIS SECTION OF CODE WILL PROCESS THE TEMPLATE AND OUTPUT TO A *.CONF FILE\n\t\t\tdirectory = get_template_directory(node_object[index]['platform'],node_object[index]['opersys'],node_object[index]['type'])\n\t\t\tenv = Environment(loader=FileSystemLoader(\"{}\".format(directory)))\n\t\t\tbaseline = env.get_template(template)\n\t\t\tf = open(\"/rendered-configs/{}.{}\".format(node_object[index]['hostname'],template.split('.')[0]) + \".conf\", \"w\") \n\n\t\t\t### GENERATING TEMPLATE BASED ON NODE OBJECT\n\t\t\tconfig = baseline.render(nodes = node_object[index])\n\n\t\t\tf.write(config) \n\t\t\tf.close \n\t\t\tif(node_object[index]['platform'] == 'cisco'):\n\n\t\t\t\t### THIS SECTION OF CODE WILL OPEN THE RENDERED-CONFIG *.CONF FILE AND STORE IN RENDERED_CONFIG AS A LIST\n\t\t\t\tf = open(\"/rendered-configs/{}.{}\".format(node_object[index]['hostname'],template.split('.')[0]) + \".conf\", \"r\")\n\t\t\t\tinit_config = f.readlines()\n\t\t\t\t### RENDERED_CONFIG IS A LIST OF ALL THE CONFIGS THAT WAS RENDERED FROM THE TEMPLATES (SOURCE OF TRUTH)\n\n\t\t\tif(node_object[index]['platform'] == 'juniper'):\n\t\n\t\t\t\t### THIS SECTION OF CODE WILL OPEN THE RENDERED-CONFIG *.CONF FILE AND STORE IN RENDERED_CONFIG AS A LIST\n\t\t\t\tf = open(\"/rendered-configs/{}.{}\".format(node_object[index]['hostname'],template.split('.')[0]) + \".conf\", \"r\")\n\t\t\t\tinit_config = f.readlines()\n\t\t\t\t### RENDERED_CONFIG IS A LIST OF ALL THE CONFIGS THAT WAS RENDERED FROM THE TEMPLATES (SOURCE OF TRUTH)\n\t\n\t\t\t\tfor config_line in init_config:\n\t\t\t\t\tstrip_config = config_line.strip('\\n')\n\t\t\t\t\t### THIS WILL REMOVE ANY LINES THAT ARE EMPTY OR HAS A '!' MARK\n\t\t\t\t\tif(strip_config == '' or strip_config == \"!\"):\n\t\t\t\t\t\tcontinue\t\n\t\t\t\t\telse:\n\t\t\t\t\t\trendered_config.append(strip_config)\t\n\t\n\t\t\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\t\t\t\tprint (\"RENDERED CONFIG: {}\".format(rendered_config))\n\n\t\ttemplate_list = get_updated_list(template_list_copy)\n\n\t\tif(node_object[index]['platform'] == 'cisco'):\n\t\t\tredirect.append('get_config')\n\t\t\tcommand.append([''])\n\t\t### JUNIPER DEVICES WILL RECEIVE A DIFFERENT REDIRECT THAN CISCO PLATFORM\n\t\t### THREE ADDITIONAL COMMANDS ARE APPENEDED AT THE END, ^D, SHOW | COMPARE AND ROLLBACK 0\n\t\t### ALL TEMPLATES MATCHING ARE EXECUTED AT ONCE PER DEVICE\n\t\telif(node_object[index]['platform'] == 'juniper'):\n\t\t\tredirect.append('get_diff')\n\t\t\trendered_config.append('\\x04')\n\t\t\trendered_config.append('show | compare')\n\t\t\trendered_config.append('rollback 0')\n\t\t\tcommand.append(rendered_config)\n\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint\"REDIRECT: {}\".format(redirect)\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint\"COMMAND: {}\".format(command)\n#\tprint(\"[+] [COMPUTING DIFF. STANDBY...]\")\n\tmultithread_engine(initialize.ntw_device,redirect,command)\n\t\n\t### RESETING TEMPLATE_LIST TO ORIGINAL LIST\n\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint(\"ORIGINAL_LIST: {}\".format(template_list_original))\n\ttemplate_list = template_list_original\n\n\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\tprint(\"TEMPLATE_LIST: {}\".format(template_list))\n\n\t### REINITIALIZING TEMPLATE_LIST TO THE ORIGINAL LIST OF TEMPLATES\n\tif(auditcreeper):\n\t\ttemplate_list = template_list_original[0]\n\n\t### THIS FOR LOOP WILL LOOP THROUGH ALL THE MATCHED ELEMENTS FROM THE USER SEARCH AND AUDIT ON SPECIFIC TEMPLATE OR IF NO ARGUMENT IS GIVEN, ALL TEMPLATES\n\t\n\tfor index in initialize.element:\n\n\t\t### NODE_CONFIG IS THE FINALIZED CONFIG TO PUSH TO THE NODE FOR REMEDIATION\n\t\tnode_configs = []\n\t\tntw_device_pop = True \n\t\t### TEMPLATE_NAME IS SET TO TRUE IN ORDER TO PRINT OUT THE TEMPLATE HEADING WHEN RECURSING\n\t\ttemplate_name = True\n\n\t\tif(not remediation):\n\t\t\tprint(\"Only in the device: -\")\n\t\t\tprint(\"Only in the generated config: +\")\n\t\t\tprint (\"{}\".format(node_object[index]['hostname']))\n\n\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n\t\ttemplate_list_juniper = template_list[:]\n\t\tif(node_object[index]['platform'] == 'juniper'):\n\n\t\t\t### THIS WILL RETURN A SORTED JUNIPER TEMPLATE LIST BASED ON JUNIPER'S 'SHOW CONFIGURATION' OUTPUT\n\t\t\ttemplate_list = get_sorted_juniper_template_list(template_list)\n\n\t\t### THIS WILL LOOP THROUGH ALL THE TEMPLATES SPECIFIED FOR THE PARTICULAR HOST IN NODES.YAML\n\t\tfor template in template_list:\n\n\t\t\t### THIS SECTION IS FOR CISCO SYSTEMS PLATFORM ###\n\t\t\tif(node_object[index]['platform'] == 'cisco'):\n\n\t\t\t\tcisco_audit_diff(node_object,index,template,AUDIT_FILTER_RE,output,remediation)\n\t\n\t\t\t### THIS SECTION IS FOR JUNIPER NETWORKS PLATFORM ###\n\t\t\tif(node_object[index]['platform'] == 'juniper'):\n\n\t\t\t\tdirectory = get_template_directory(node_object[index]['platform'],node_object[index]['opersys'],node_object[index]['type'])\n\t\t\t\t### THIS SECTION OF CODE WILL OPEN DIFF-CONFIG *.CONF FILE AND STORE IN DIFF_CONFIG AS A LIST\n\t\t\t\tf = open(\"/diff-configs/{}\".format(node_object[index]['hostname']) + \".conf\", \"r\")\n\t\t\t\tinit_config = f.readlines()\n\t\t\t\t### DIFF_CONFIG ARE THE DIFFERENTIAL CONFIGS GENERATED BY THE /DIFF-CONFIGS/*.CONF FILE \n\t\t\t\tdiff_config = []\n\t\n\t\t\t\tfor config_line in init_config:\n\t\t\t\t\tstrip_config = config_line.strip('\\n')\n\t\t\t\t\tdiff_config.append(strip_config)\t\n\t\n\t\t\t\t###UN-COMMENT THE BELOW PRINT STATEMENT FOR DEBUGING PURPOSES\n#\t\t\t\tprint (\"DIFF CONFIG: {}\".format(diff_config))\n\n\t\t\t\tRE = re.compile(r'\\[edit\\s({})'.format(template.split('.')[0]))\n\t\t\t\tsearch = list(filter(RE.match,diff_config))\n\n\t\t\t\tif(len(search) == 0):\n\t\t\t\t\tprint(\"{}{} (none)\".format(directory,template))\n\t\t\t\t\tprint('')\n\t\t\t\t\tno_diff = no_diff + 1\n\t\t\t\t\tif(no_diff == len(template_list)):\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif(len(template_list) > 1):\t\n\t\t\t\t\t\tjuniper_audit_diff(directory,template,template_list,diff_config,edit_list,search)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t### THIS FIRST SECTION WILL FIND ALL THE INDEXES WITH THE '[edit