diff --git "a/1213.jsonl" "b/1213.jsonl" new file mode 100644--- /dev/null +++ "b/1213.jsonl" @@ -0,0 +1,432 @@ +{"seq_id": "57179236", "text": "import json\n\nfrom flask_sitemap import Sitemap\nfrom invenio_pidstore.models import PersistentIdentifier\nfrom oarepo_records_draft import current_drafts\n\nfrom sample.config import SAMPLE_DRAFT_PID_TYPE\nfrom sample.record import SampleDraftRecord\nfrom sample.search import SampleRecordsSearch\n\n\ndef test_sitemap_extension(app, client, db, community):\n\n assert isinstance(app.extensions['sitemap'], Sitemap)\n\n url = \"https://localhost:5000/sitemap.xml\"\n response = client.get(url)\n print(response.data)\n\n assert response.status_code == 200\n\n app.config['SITEMAP_MAX_URL_COUNT'] = 1\n for pid in range(1, 100):\n client.post('/cesnet/records/draft/',\n data=json.dumps({\"title\": \"title\", \"_primary_community\": \"cesnet\", \"state\": \"published\"}),\n content_type='application/json')\n record_pid = PersistentIdentifier.query.filter_by(pid_type=SAMPLE_DRAFT_PID_TYPE, pid_value=pid).one()\n record = SampleDraftRecord.get_record(id_=record_pid.object_uuid)\n current_drafts.publish(record=record, record_pid=record_pid, require_valid=False)\n\n url = \"https://localhost:5000/sitemap.xml\"\n response = client.get(url)\n print(response.data)\n assert response.status_code == 200\n for x in SampleRecordsSearch(index=\"sample-sample-v1.0.0\").source(includes=['id', '_primary_community']):\n print(x)\n\n assert 'http://localhost:5000/sitemap1.xml' in str(response.data)\n assert 'http://localhost:5000/sitemap10.xml' in str(response.data)\n assert 'http://localhost:5000/sitemap11.xml' not in str(response.data)", "sub_path": "tests/test_sitemap_extension.py", "file_name": "test_sitemap_extension.py", "file_ext": "py", "file_size_in_byte": 1598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask_sitemap.Sitemap", "line_number": 14, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "invenio_pidstore.models.PersistentIdentifier.query.filter_by", "line_number": 27, "usage_type": "call"}, {"api_name": "invenio_pidstore.models.PersistentIdentifier.query", "line_number": 27, "usage_type": "attribute"}, {"api_name": "invenio_pidstore.models.PersistentIdentifier", "line_number": 27, "usage_type": "name"}, {"api_name": "sample.config.SAMPLE_DRAFT_PID_TYPE", "line_number": 27, "usage_type": "name"}, {"api_name": "sample.record.SampleDraftRecord.get_record", "line_number": 28, "usage_type": "call"}, {"api_name": "sample.record.SampleDraftRecord", "line_number": 28, "usage_type": "name"}, {"api_name": "oarepo_records_draft.current_drafts.publish", "line_number": 29, "usage_type": "call"}, {"api_name": "oarepo_records_draft.current_drafts", "line_number": 29, "usage_type": "name"}, {"api_name": "sample.search.SampleRecordsSearch", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "502725481", "text": "'''\nAuthor: Tianyi Lu\nDescription: Olympics API\nDate: 2021-02-05 13:49:43\nLastEditors: Tianyi Lu\nLastEditTime: 2021-02-11 05:37:24\n'''\n\nimport flask\nimport sys\nimport argparse\nimport json\nfrom olympics import Olympics\n\napp = flask.Flask(__name__)\nolympics_object = Olympics()\n\n@app.route('/')\ndef hello():\n return 'Hello, this is Olympics API implemented by Sky Lu.'\n\n@app.route('/games')\ndef games():\n games_list = olympics_object.get_games()\n key_list = ['id', 'year', 'season', 'city']\n result_list = list_to_dict(key_list, games_list)\n return json.dumps(result_list)\n\n@app.route('/nocs')\ndef nocs():\n nocs_list = olympics_object.get_nocs()\n key_list = ['abbreviation','name']\n result_list = list_to_dict(key_list, nocs_list)\n return json.dumps(result_list)\n\n@app.route('/medalists/games/')\ndef medalists(games_id):\n noc = flask.request.args.get('noc') or None\n key_list = ['athlete_id', 'athlete_name', 'athlete_sex', 'event', 'medal']\n medalists_list = olympics_object.get_medalists_by_games(games_id, noc)\n result_list = list_to_dict(key_list, medalists_list)\n return json.dumps(result_list)\n\ndef list_to_dict(key_list, value_list):\n return [{x[0]:x[1] for x in zip(key_list, row)} for row in value_list]\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('A sample Flask application/API')\n parser.add_argument('host', help='the host on which this application is running')\n parser.add_argument('port', type=int, help='the port on which this application is listening')\n arguments = parser.parse_args()\n app.run(host=arguments.host, port=arguments.port, debug=True)", "sub_path": "olympics-api/olympics-api.py", "file_name": "olympics-api.py", "file_ext": "py", "file_size_in_byte": 1650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "olympics.Olympics", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "505620610", "text": "from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom todo.models import *\nfrom django.db import transaction\n\n\n# Create your views here.\n\nclass CreateBucket(APIView):\n\n def get(self, request):\n buckets = []\n bucket = Bucket.objects.all()\n for buck in bucket:\n data = {\"id\":buck.id, \"name\": buck.name, \"create_time\": buck.datetime.time(), \"create_date\": buck.datetime.date()}\n buckets.append(data)\n return Response({\"message\": \"succesful\", \"data\": buckets})\n\n @transaction.atomic()\n def post(self, request):\n name = request.POST.get(\"name\")\n sid = transaction.savepoint()\n if name != \"\":\n bucket = Bucket(name=name)\n bucket.save()\n transaction.savepoint_commit(sid)\n return Response({\"message\": \"New Bucket Created with {name}\".format(name=name)})\n else:\n transaction.savepoint_rollback(sid)\n return Response({\"message\": \"error\"})\n\n\nclass CreateTodo(APIView):\n\n def get(self, request):\n bucket_id = request.GET.get(\"bucket_id\")\n todos = []\n if Bucket.objects.filter(id=bucket_id).exists():\n todo = Todo.objects.filter(bucket=bucket_id)\n for to in todo:\n data = {\"bucket_name\": to.bucket.name, \"bucket_id\": to.bucket.id, \"todo_name\": to.name,\n \"date\": to.target.date(), \"time\": to.target.time(), \"todo_id\": to.id, \"details\": to.details}\n todos.append(data)\n return Response({\"message\": \"succesful\", \"data\": todos})\n else:\n return Response({\"message\": \"unsuccesful\", \"data\": \"Invalid parameter\"})\n\n def post(self, request):\n bucket_id = request.POST.get(\"bucket_id\")\n name = request.POST.get(\"name\")\n date_time = request.POST.get(\"date_time\")\n details = request.POST.get(\"details\")\n sid = transaction.savepoint()\n if bucket_id!=\"\":\n if Bucket.objects.filter(id=bucket_id).exists():\n if name != \"\" or date_time!=\"\":\n bucket = Bucket.objects.get(id=bucket_id)\n todo = Todo(bucket=bucket, name=name, target=date_time, details=details)\n todo.save()\n transaction.savepoint_commit(sid)\n return Response({\"message\": \"New Bucket Created with {name}\".format(name=name)})\n else:\n transaction.savepoint_rollback(sid)\n return Response({\"message\": \"error\"})\n else:\n return Response({\"message\": \"Invalid Bucket\"})\n else:\n return Response({\"message\": \"Invalid Bucket\"})\n\n def delete(self, request):\n bucket_id = request.GET.get(\"bucket_id\")\n todo_id = request.GET.get(\"todo_id\")\n if Bucket.objects.filter(id=bucket_id).exists() and Todo.objects.filter(id=todo_id).exists():\n Todo.objects.filter(bucket=bucket_id, id=todo_id).delete()\n return Response({\"message\": \"Deleted succesfully\"})\n else:\n return Response({\"message\": \"unsuccesful\", \"data\": \"Invalid Bucket or todo data\"})\n\n @transaction.atomic()\n def put(self, request):\n sid = transaction.savepoint()\n bucket_id = request.POST.get(\"bucket_id\")\n todo_id = request.POST.get(\"todo_id\")\n name = request.POST.get(\"name\")\n date_time = request.POST.get(\"date_time\")\n details = request.POST.get(\"details\")\n print (bucket_id, name)\n print (Bucket.objects.filter(id=bucket_id), Todo.objects.filter(id=todo_id))\n if Bucket.objects.filter(id=bucket_id).exists() and Todo.objects.filter(id=todo_id).exists():\n todo_obj = Todo.objects.get(bucket=bucket_id, id=todo_id)\n todo_obj.name = name\n todo_obj.date_time = date_time\n todo_obj.details = details\n todo_obj.save()\n transaction.savepoint_commit(sid)\n return Response({\"message\": \"Successfully edited Todo\"})\n else:\n transaction.savepoint_rollback(sid)\n return Response({\"message\": \"unSuccessful\"})\n\n\n", "sub_path": "todo/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.transaction.savepoint", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.transaction.savepoint_commit", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.transaction.savepoint_rollback", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 30, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 34, "usage_type": "name"}, {"api_name": "todo.models", "line_number": 40, "usage_type": "name"}, {"api_name": "todo.models", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.transaction.savepoint", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 54, "usage_type": "name"}, {"api_name": "todo.models", "line_number": 59, "usage_type": "name"}, {"api_name": "todo.models.save", "line_number": 60, "usage_type": "call"}, {"api_name": "todo.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.transaction.savepoint_commit", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.transaction.savepoint_rollback", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 64, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 65, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 67, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 69, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 76, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.transaction.savepoint", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.transaction.savepoint_commit", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.transaction.savepoint_rollback", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 99, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "219310276", "text": "# coding=utf-8\n\"\"\"Several middleware, for production or debugging purposes\n========================================================\n\n\"\"\"\nfrom __future__ import unicode_literals\nimport codecs\nfrom django.contrib.sessions.backends.base import VALID_KEY_CHARS\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.crypto import get_random_string\n# noinspection PyPackageRequirements\nfrom pipeline.compilers import CompilerBase\nimport base64\nfrom django.conf import settings\nfrom django.contrib import auth\nfrom django.contrib.auth.middleware import RemoteUserMiddleware as BaseRemoteUserMiddleware\nfrom django.contrib.auth.models import Group\n# noinspection PyPackageRequirements\nfrom pipeline.compressors import CompressorBase\nfrom djangofloor.df_pipeline import cssmin\n\n__author__ = 'Matthieu Gallet'\n\n\nclass IEMiddleware(object):\n \"\"\"required for signals tight to a window\n Add a HTTP header for Internet Explorer Compatibility.\n Ensure that IE uses the last version of its display engine.\n \"\"\"\n # noinspection PyMethodMayBeStatic\n def process_request(self, request):\n request.window_key = get_random_string(32, VALID_KEY_CHARS)\n\n # noinspection PyUnusedLocal,PyMethodMayBeStatic\n def process_template_response(self, request, response):\n response['X-UA-Compatible'] = 'IE=edge,chrome=1'\n return response\n\n\nclass RemoteUserMiddleware(BaseRemoteUserMiddleware):\n \"\"\"Like :class:`django.contrib.auth.middleware.RemoteUserMiddleware` but:\n\n * can use any header defined by the setting `FLOOR_AUTHENTICATION_HEADER`,\n * add a `df_remote_authenticated` attribute to the request (`True` if the user has been authenticated via the header)\n \"\"\"\n header = settings.FLOOR_AUTHENTICATION_HEADER\n\n def process_request(self, request):\n request.df_remote_authenticated = False\n if request.META['REMOTE_ADDR'] in settings.REVERSE_PROXY_IPS and self.header and self.header in request.META:\n if not request.user.is_authenticated():\n self.original_process_request(request)\n request.df_remote_authenticated = request.user.is_authenticated()\n\n def original_process_request(self, request):\n # AuthenticationMiddleware is required so that request.user exists.\n if not hasattr(request, 'user'):\n raise ImproperlyConfigured(\n \"The Django remote user auth middleware requires the\"\n \" authentication middleware to be installed. Edit your\"\n \" MIDDLEWARE_CLASSES setting to insert\"\n \" 'django.contrib.auth.middleware.AuthenticationMiddleware'\"\n \" before the RemoteUserMiddleware class.\")\n username = request.META.get(self.header)\n if not username or username == '(null)': # special case caused by Apache :-(\n return\n username, sep, domain = username.partition('@')\n # If the user is already authenticated and that user is the user we are\n # getting passed in the headers, then the correct user is already\n # persisted in the session and we don't need to continue.\n if request.user.is_authenticated():\n if request.user.get_username() == self.clean_username(username, request):\n return\n else:\n # An authenticated user is associated with the request, but\n # it does not match the authorized user in the header.\n self._remove_invalid_user(request)\n\n # We are seeing this user for the first time in this session, attempt\n # to authenticate the user.\n user = auth.authenticate(remote_user=username)\n if user:\n # User is valid. Set request.user and persist user in the session\n # by logging the user in.\n request.user = user\n auth.login(request, user)\n\n\nclass FakeAuthenticationMiddleware(object):\n \"\"\" Only for dev/debugging purpose: emulate a user authenticated by the remote proxy.\n\n Use `settings.FLOOR_FAKE_AUTHENTICATION_USERNAME` to create (if needed) a user and authenticate the request.\n Only works in `settings.DEBUG` mode and if `settings.FLOOR_FAKE_AUTHENTICATION_USERNAME` is set.\n \"\"\"\n group_cache = {}\n\n # noinspection PyMethodMayBeStatic\n def process_request(self, request):\n username = settings.FLOOR_FAKE_AUTHENTICATION_USERNAME\n if not settings.DEBUG or not username:\n return\n user = auth.authenticate(remote_user=username)\n if user:\n request.user = user\n auth.login(request, user)\n request.df_remote_authenticated = True\n if settings.FLOOR_FAKE_AUTHENTICATION_GROUPS:\n for group_name in settings.FLOOR_FAKE_AUTHENTICATION_GROUPS:\n if group_name not in self.group_cache:\n group, created = Group.objects.get_or_create(name=group_name)\n self.group_cache[group_name] = group\n else:\n group = self.group_cache[group_name]\n user.groups.add(group)\n\n\nclass BasicAuthMiddleware(object):\n \"\"\"Basic HTTP authentication using Django users to check passwords.\n \"\"\"\n\n # noinspection PyMethodMayBeStatic\n def process_request(self, request):\n if 'HTTP_AUTHORIZATION' in request.META:\n authentication = request.META['HTTP_AUTHORIZATION']\n (authmeth, auth_data) = authentication.split(' ', 1)\n if 'basic' == authmeth.lower():\n auth_data = base64.b64decode(auth_data.strip()).decode('utf-8')\n username, password = auth_data.split(':', 1)\n user = auth.authenticate(username=username, password=password)\n if user:\n request.user = user\n auth.login(request, user)\n\n\n# noinspection PyAbstractClass\nclass RCSSMinCompressor(CompressorBase):\n\n @staticmethod\n def compress_css(css):\n return cssmin(css)\n\n\nclass PyScssCompiler(CompilerBase):\n output_extension = 'css'\n\n def match_file(self, filename):\n return filename.endswith('.scss')\n\n def compile_file(self, infile, outfile, outdated=False, force=False):\n # noinspection PyPackageRequirements\n import scss.compiler\n \"\"\"Define your middlewares here\"\"\"\n if not outdated and not force:\n return # No need to recompiled file\n result = scss.compiler.compile_file(infile)\n with codecs.open(outfile, 'w', encoding='utf-8') as fd:\n fd.write(result)\n", "sub_path": "djangofloor/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 6591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.utils.crypto.get_random_string", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.sessions.backends.base.VALID_KEY_CHARS", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.contrib.auth.middleware.RemoteUserMiddleware", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.FLOOR_AUTHENTICATION_HEADER", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 46, "usage_type": "name"}, {"api_name": "django.conf.settings.REVERSE_PROXY_IPS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 50, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 81, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 86, "usage_type": "name"}, {"api_name": "django.conf.settings.FLOOR_FAKE_AUTHENTICATION_USERNAME", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 99, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 102, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 102, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 105, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 105, "usage_type": "name"}, {"api_name": "django.conf.settings.FLOOR_FAKE_AUTHENTICATION_GROUPS", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 107, "usage_type": "name"}, {"api_name": "django.conf.settings.FLOOR_FAKE_AUTHENTICATION_GROUPS", "line_number": 108, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 108, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.get_or_create", "line_number": 110, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 110, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 127, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 129, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 129, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 132, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 132, "usage_type": "name"}, {"api_name": "pipeline.compressors.CompressorBase", "line_number": 136, "usage_type": "name"}, {"api_name": "djangofloor.df_pipeline.cssmin", "line_number": 140, "usage_type": "call"}, {"api_name": "pipeline.compilers.CompilerBase", "line_number": 143, "usage_type": "name"}, {"api_name": "scss.compiler.compiler.compile_file", "line_number": 155, "usage_type": "call"}, {"api_name": "scss.compiler.compiler", "line_number": 155, "usage_type": "attribute"}, {"api_name": "scss.compiler", "line_number": 155, "usage_type": "name"}, {"api_name": "codecs.open", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "140648335", "text": "\"\"\"Test the Thermostat status manager.\"\"\"\nimport asyncio\nfrom random import randint\nfrom unittest import TestCase\nfrom unittest.mock import AsyncMock\n\nfrom pyinsteon.data_types.user_data import UserData\nfrom pyinsteon.managers.thermostat_status_manager import GetThermostatStatus\nfrom pyinsteon.topics import EXTENDED_GET_RESPONSE, EXTENDED_GET_SET\n\nfrom .. import set_log_levels\nfrom ..utils import TopicItem, async_case, random_address, send_topics\n\n\nclass TestThermostatStatusManager(TestCase):\n \"\"\"Test the thermostat status manager.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Set up the tests.\"\"\"\n set_log_levels(logger_topics=True)\n\n @async_case\n async def test_status_commands(self):\n \"\"\"Test the thermostat status commands.\"\"\"\n status_receved = AsyncMock()\n set_point_received = AsyncMock\n\n def _status_received(\n day,\n hour,\n minute,\n second,\n system_mode,\n fan_mode,\n cool_set_point,\n humidity,\n temperature,\n cooling,\n heating,\n celsius,\n heat_set_point,\n ):\n nonlocal status_receved\n status_receved = True\n\n def _set_point_received(\n humidity_high,\n humidity_low,\n firmwire,\n cool_set_point,\n heat_set_point,\n rf_offset,\n ):\n \"\"\"Receive set point info.\"\"\"\n nonlocal set_point_received\n set_point_received = True\n\n address = random_address()\n target = random_address()\n command = GetThermostatStatus(address=address)\n command.subscribe_status(_status_received)\n command.subscribe_set_point(_set_point_received)\n ack_topic = f\"ack.{address.id}.{EXTENDED_GET_SET}.direct\"\n dir_ack_topic = f\"{address.id}.{EXTENDED_GET_SET}.direct_ack\"\n response_topic = f\"{address.id}.{EXTENDED_GET_RESPONSE}.direct\"\n ud_status_ack = UserData({\"d13\": 0x92, \"d14\": 0x96})\n ack_status = TopicItem(\n ack_topic, {\"cmd1\": 0x2E, \"cmd2\": 0x02, \"user_data\": ud_status_ack}, 0.2\n )\n dir_ack_status = TopicItem(\n dir_ack_topic,\n {\n \"cmd1\": 0x2E,\n \"cmd2\": 0x02,\n \"user_data\": None,\n \"target\": target,\n \"hops_left\": 0,\n },\n 0.1,\n )\n user_data_response = UserData(\n {\n \"d1\": 0x01,\n \"d2\": randint(20, 255),\n \"d3\": randint(20, 255),\n \"d4\": randint(20, 255),\n \"d5\": randint(20, 255),\n \"d6\": randint(20, 255),\n \"d7\": randint(20, 255),\n \"d8\": randint(20, 255),\n \"d9\": randint(20, 255),\n \"d10\": randint(20, 255),\n \"d11\": randint(20, 255),\n \"d12\": randint(20, 255),\n }\n )\n status_response = TopicItem(\n response_topic,\n {\n \"cmd1\": 0x2E,\n \"cmd2\": 0x02,\n \"user_data\": user_data_response,\n \"target\": target,\n \"hops_left\": 0,\n },\n 1,\n )\n\n ud_setpt_ack = UserData({\"d3\": 0x01, \"d13\": 0x20, \"d14\": 0x0F})\n ack_setpt = TopicItem(\n ack_topic, {\"cmd1\": 0x2E, \"cmd2\": 0x00, \"user_data\": ud_setpt_ack}, 0.1\n )\n dir_ack_setpt = TopicItem(\n dir_ack_topic,\n {\n \"cmd1\": 0x2E,\n \"cmd2\": 0x00,\n \"user_data\": None,\n \"target\": target,\n \"hops_left\": 0,\n },\n 0.1,\n )\n ud_setpt_response = UserData(\n {\n \"d1\": 0x00,\n \"d2\": 0x01,\n \"d3\": 0x01,\n \"d4\": randint(20, 255),\n \"d5\": randint(20, 255),\n \"d6\": randint(20, 255),\n \"d7\": randint(20, 255),\n \"d8\": randint(20, 255),\n \"d9\": randint(20, 255),\n \"d10\": randint(20, 255),\n \"d11\": randint(20, 255),\n \"d12\": randint(20, 255),\n }\n )\n setpt_response = TopicItem(\n response_topic,\n {\n \"cmd1\": 0x2E,\n \"cmd2\": 0x00,\n \"user_data\": ud_setpt_response,\n \"target\": target,\n \"hops_left\": 0,\n },\n 0.1,\n )\n\n send_topics(\n [\n ack_status,\n dir_ack_status,\n status_response,\n ack_setpt,\n dir_ack_setpt,\n setpt_response,\n ]\n )\n await command.async_status()\n await asyncio.sleep(0.1)\n assert status_receved\n assert set_point_received\n", "sub_path": "tests/test_managers/test_thermostat_status_manager.py", "file_name": "test_thermostat_status_manager.py", "file_ext": "py", "file_size_in_byte": 4976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 15, "usage_type": "name"}, {"api_name": "unittest.mock.AsyncMock", "line_number": 25, "usage_type": "call"}, {"api_name": "unittest.mock.AsyncMock", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.random_address", "line_number": 58, "usage_type": "call"}, {"api_name": "utils.random_address", "line_number": 59, "usage_type": "call"}, {"api_name": "pyinsteon.managers.thermostat_status_manager.GetThermostatStatus", "line_number": 60, "usage_type": "call"}, {"api_name": "pyinsteon.topics.EXTENDED_GET_SET", "line_number": 63, "usage_type": "name"}, {"api_name": "pyinsteon.topics.EXTENDED_GET_SET", "line_number": 64, "usage_type": "name"}, {"api_name": "pyinsteon.topics.EXTENDED_GET_RESPONSE", "line_number": 65, "usage_type": "name"}, {"api_name": "pyinsteon.data_types.user_data.UserData", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.TopicItem", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.TopicItem", "line_number": 70, "usage_type": "call"}, {"api_name": "pyinsteon.data_types.user_data.UserData", "line_number": 81, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 86, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 87, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 89, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 90, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 91, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.TopicItem", "line_number": 97, "usage_type": "call"}, {"api_name": "pyinsteon.data_types.user_data.UserData", "line_number": 109, "usage_type": "call"}, {"api_name": "utils.TopicItem", "line_number": 110, "usage_type": "call"}, {"api_name": "utils.TopicItem", "line_number": 113, "usage_type": "call"}, {"api_name": "pyinsteon.data_types.user_data.UserData", "line_number": 124, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 129, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 130, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 131, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 132, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 133, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 134, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 135, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 136, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 137, "usage_type": "call"}, {"api_name": "utils.TopicItem", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.send_topics", "line_number": 152, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 163, "usage_type": "call"}, {"api_name": "utils.async_case", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "341504575", "text": "import cv2\nimport numpy as np\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn import preprocessing\nfrom helperFunctions import loadDataFromVideo,loadDataFromBinary,saveData\nfrom networkFunctions import createNetwork\nfrom scipy.interpolate import interp1d\n\nX,Y = loadDataFromBinary()\n#scalerX = preprocessing.MinMaxScaler(feature_range=(-3,3))\n#scalerY = preprocessing.MinMaxScaler(feature_range=(-10,10))\nfrom sklearn.externals import joblib\n# joblib.dump(scalerX, 'scalerX.pkl')\n# joblib.dump(scalerY, 'scalerY.pkl')\n\nscalerX = joblib.load('scalerX.pkl')\nscalerY = joblib.load('scalerY.pkl')\nX_transformed = scalerX.fit_transform(X)\nY = scalerY.fit_transform(Y)\n\n\n\nwith tf.Session() as sess:\n optimizer, cost, pred, x, y_, keep_prob, new_saver = createNetwork()\n ckpt = tf.train.get_checkpoint_state('/home/iftimie/PycharmProjects/Autonomous-RCVehiclePython/AutonomousRpi/')\n if ckpt and ckpt.model_checkpoint_path:\n new_saver.restore(sess, ckpt.model_checkpoint_path)\n\n\n Y_pred = sess.run(pred, feed_dict={x: X_transformed, keep_prob: 1.0})\n mapper = interp1d([-10,10],[640,0])\n Y_pred = mapper(Y_pred)\n Y = mapper(Y)\n for i in range(len(Y)):\n xcheck = cv2.resize(X[i].reshape((64, 128)), (640, 480 - 230))\n xcheck = cv2.cvtColor(xcheck,cv2.COLOR_GRAY2BGR)\n xcheck = cv2.circle(xcheck,(int(Y[i]),50),10,[255,0,0],9)\n xcheck = cv2.circle(xcheck,(int(Y_pred[i]),60),10,[0,0,255],9)\n cv2.imshow(\"out\",xcheck)\n cv2.waitKey(22)\n\n\n\n#\n", "sub_path": "Autonomous-RCVehiclePython/AutonomousRpi/testNet.py", "file_name": "testNet.py", "file_ext": "py", "file_size_in_byte": 1511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "helperFunctions.loadDataFromBinary", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 17, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 18, "usage_type": "name"}, {"api_name": "tensorflow.Session", "line_number": 24, "usage_type": "call"}, {"api_name": "networkFunctions.createNetwork", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 26, "usage_type": "attribute"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "107100300", "text": "from django.http import HttpResponse\nfrom base.models import Owner\nfrom django.shortcuts import render\nfrom django.template import loader\n# Create your views here.\n\ndef index(request):\n\towner_list = Owner.objects.all()\n\ttemplate = loader.get_template('ownerside/index.html')\n\tcontext = {\n\t\t'owner_list':owner_list,\n\t}\n\treturn render(request, 'ownerside/index.html', context)\n\ndef detail(request, param1):\n\towner = Owner.objects.get(id=param1)\n\ttemplate = loader.get_template('ownerside/detail.html')\n\tcontext = {\n\t\t'owner':owner,\n\t}\n\n\treturn render(request, 'ownerside/detail.html', context)\n\n# Create your views here.\n", "sub_path": "ownerside/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "base.models.Owner.objects.all", "line_number": 8, "usage_type": "call"}, {"api_name": "base.models.Owner.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "base.models.Owner", "line_number": 8, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 9, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 9, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "base.models.Owner.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "base.models.Owner.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "base.models.Owner", "line_number": 16, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 17, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "24348646", "text": "import midi\nfrom openpyxl import Workbook\nimport os\n#key_names = {'C':12,'C#':13,'D':14,'D#':15,'E':16,'F':17,'F#':18,'G':19,'G#':20,'A':21,'A#':22,'B':23}\nkey_names = {12:'C',13:'C#',14:'D',15:'D#',16:'E',17:'F',18:'F#',19:'G',20:'G#',21:'A',22:'A#',23:'B',24:'C'}\n\nwb = Workbook()\nwb.create_sheet(index=0,title=\"Test Files MIDI\")\nws = wb.active\nws['A1'] = 'Test Files'\nws['B1'] = 'No. Of Keys'\nws['C1'] = 'Expected Keys'\nws['D1'] = 'Expected Loudness Of Notes'\n\n\nfiles = os.listdir(\"test_cases/all\")\nnotes = []\nlength = []\nloudness = []\ncount = 0\nfor i in files:\n note = \"\"\n loud = \"\"\n if i.endswith(\".mid\"):\n pattern = midi.read_midifile(\"test_cases/all/\"+i)\n\n for i in range(0,len(pattern[0])-1,4):\n octave = pattern[0][i].pitch//12 - 1\n key = pattern[0][i].pitch - 12*octave;\n loud = loud + key_names[key] + \" : \" + str(pattern[0][i].velocity) + \", \"\n note = note + key_names[key] + \" \"\n\n notes.append(note)\n loudness.append(loud)\n length.append(len(pattern[0])//4)\n count += 1\n\n\n\nfor k in range(count):\n ws.cell(row=k+2, column=1).value = str(files[k])\n ws.cell(row=k+2, column=2).value = str(length[k])\n ws.cell(row=k+2, column=3).value = str(notes[k])\n ws.cell(row=k+2, column=4).value = str(loudness[k])\nwb.save(\"Test_Case_All_1.xlsx\")\n", "sub_path": "main/all_test.py", "file_name": "all_test.py", "file_ext": "py", "file_size_in_byte": 1352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "openpyxl.Workbook", "line_number": 7, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "midi.read_midifile", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "578515794", "text": "import s01.colab_utils as u\nimport cv2\n\nyoutube_file = 'video.avi'\nu.download_youtube_video('JZdqjtWsL0U', '\"best[height=360]\"', youtube_file)\n\nvideo = cv2.VideoCapture(youtube_file)\nwidth = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\nframes_per_second = video.get(cv2.CAP_PROP_FPS)\nnum_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n\noutput_file = cv2.VideoWriter(\n filename='video-output.avi',\n # some installation of opencv may not support x264 (due to its license),\n # you can try other format (e.g. MPEG o XVID o x264)\n fourcc=cv2.VideoWriter_fourcc(*\"XVID\"),\n fps=frames_per_second,\n frameSize=(width, height),\n isColor=True,\n)\n\ndetectron2 = 'Detectron2 bounding boxes'\nsift_ransac = 'After SIFT and RANSAC postprocessing'\n\n\ndef put_text(lbl, bottomLeftCornerOfText, fontColor=(255, 255, 255)):\n fontScale = 1\n\n lineType = 2\n cv2.putText(frame, lbl,\n bottomLeftCornerOfText,\n cv2.FONT_HERSHEY_SIMPLEX,\n fontScale,\n fontColor,\n lineType)\n\n\nvert_displ = 30\ncolor = (200, 200, 200)\nfor idx, frame in enumerate(u.video_frames(youtube_file, 0, None, apply_COLOR_RGB2BGR=False)):\n # put_text(f'{idx}'.rjust(4, '0'), (width - 90, height - 10))\n put_text(detectron2, (5, vert_displ), fontColor=color)\n put_text(sift_ransac, (5, height // 2 + vert_displ), fontColor=color)\n output_file.write(frame)\n\noutput_file.release()\n", "sub_path": "s01/colab_utils/write-video-with-frame-index.py", "file_name": "write-video-with-frame-index.py", "file_ext": "py", "file_size_in_byte": 1487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "s01.colab_utils.download_youtube_video", "line_number": 5, "usage_type": "call"}, {"api_name": "s01.colab_utils", "line_number": 5, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 33, "usage_type": "attribute"}, {"api_name": "s01.colab_utils.video_frames", "line_number": 41, "usage_type": "call"}, {"api_name": "s01.colab_utils", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "481246304", "text": "# coding=utf-8\n# author:MagiRui\n\nimport numpy as np\nimport pandas as pd\nfrom sklearntest import tree\nfrom PIL import Image\nfrom sklearntest.externals.six import StringIO\nimport pydotplus\n\ninput_file = \"/Users/magirui/machinelearning/decisiontree02/data/PastHires.csv\"\ndf = pd.read_csv(input_file, header=0)\nprint(df.head())\n\nd = {'Y':1, 'N':0}\ndf['Hired'] = df['Hired'].map(d)\ndf['Employed?'] = df['Employed?'].map(d)\ndf['Top-tier school'] = df['Top-tier school'].map(d)\ndf['Interned'] = df['Interned'].map(d)\n\nd = {'BS': 0, 'MS': 1, 'PhD': 2}\ndf['Level of Education'] = df['Level of Education'].map(d)\nprint(df.head())\n\nfeatures = list(df.columns[:6])\nprint(features)\n\ny = df[\"Hired\"]\nX = df[features]\n\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X, y)\n\ndot_data = StringIO()\ntree.export_graphviz(clf, out_file=dot_data,\n feature_names=features)\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\ngraph.write_pdf('iris.pdf')", "sub_path": "decisiontree02/DecisionTree.py", "file_name": "DecisionTree.py", "file_ext": "py", "file_size_in_byte": 959, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearntest.tree.DecisionTreeClassifier", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearntest.tree", "line_number": 31, "usage_type": "name"}, {"api_name": "sklearntest.externals.six.StringIO", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearntest.tree.export_graphviz", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearntest.tree", "line_number": 35, "usage_type": "name"}, {"api_name": "pydotplus.graph_from_dot_data", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "300985416", "text": "import math\nfrom datetime import datetime, timedelta\nimport random\n\nimport jwt\nfrom api import app, TWOFACTORAUTHENTICATION\nfrom api.models.UserModel import User\n\n\ndef getUser(email):\n return User.query.filter_by(email=email).first()\n\n\ndef generateToken(email, type, otp=0, expiry=30):\n payload = {\n \"email\": email,\n \"type\": type,\n \"exp\": datetime.utcnow() + timedelta(minutes=expiry)\n }\n\n if type == TWOFACTORAUTHENTICATION:\n payload[\"otp\"] = otp\n\n token = jwt.encode(\n payload\n ,\n app.config['SECRET_KEY'],\n algorithm='HS256'\n )\n return token\n\n\ndef decodeToken(token):\n return jwt.decode(token, app.config['SECRET_KEY'], algorithms=[\"HS256\"])\n\n\ndef validateToken(token, tokenType):\n data = decodeToken(token)\n user = getUser(data[\"email\"])\n type = data[\"type\"]\n\n if user is None or type != tokenType:\n return False, user\n else:\n return True, user\n\n\ndef generateOTP():\n digits = \"0123456789\"\n OTP = \"\"\n\n for i in range(6):\n OTP += digits[math.floor(random.random() * 10)]\n\n return OTP\n", "sub_path": "api/services/TokenServices.py", "file_name": "TokenServices.py", "file_ext": "py", "file_size_in_byte": 1114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "api.models.UserModel.User.query.filter_by", "line_number": 11, "usage_type": "call"}, {"api_name": "api.models.UserModel.User.query", "line_number": 11, "usage_type": "attribute"}, {"api_name": "api.models.UserModel.User", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 18, "usage_type": "call"}, {"api_name": "api.TWOFACTORAUTHENTICATION", "line_number": 21, "usage_type": "name"}, {"api_name": "jwt.encode", "line_number": 24, "usage_type": "call"}, {"api_name": "api.app.config", "line_number": 27, "usage_type": "attribute"}, {"api_name": "api.app", "line_number": 27, "usage_type": "name"}, {"api_name": "jwt.decode", "line_number": 34, "usage_type": "call"}, {"api_name": "api.app.config", "line_number": 34, "usage_type": "attribute"}, {"api_name": "api.app", "line_number": 34, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 53, "usage_type": "call"}, {"api_name": "random.random", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "496913039", "text": "import numpy as np\nfrom scipy.constants import c as C\nfrom scipy.interpolate import interp1d as interp\nfrom scipy.integrate import simps as simpson\nimport matplotlib.pyplot as plt\nimport femtoQ.tools as fq\n\n\n\ndef get_FWHM(t,I_t):\n \n tmp = np.linspace(t[0],t[-1],1000000)\n \n I_tmp = interp(t,I_t,'quadratic')\n I_tmp = I_tmp(tmp)\n \n I_tmp = I_tmp - np.min(I_tmp)\n I_tmp /= np.max(I_tmp)\n\n t_1 = tmp[I_tmp>=0.5][0]\n t_2 = tmp[I_tmp>=0.5][-1]\n\n return t_2-t_1\n\ndef unpack_data(filename,wavelengthLimits):\n \n data = np.load(filename)\n \n delays = data['dispersion'] # Delays is actually dispersion window thickness\n wavelengthsSpectro = data['wavelengths']*1e-9 # Kept the same for simplicity\n trace = data['data']\n trace/= np.max(trace)\n \n trace = trace[:,((wavelengthsSpectro>wavelengthLimits[0])&(wavelengthsSpectrowavelengthLimits[0])&(wavelengthsSpectro 0.01\n \n plt.figure()\n axL = plt.gca()\n axR = axL.twinx()\n axL.plot(wavelengths[II]*1e9,np.abs(initialGuess[II])**2,'r',linewidth = 3,label = 'Initial guess')\n axL.plot(wavelengths[II]*1e9,np.abs(pulseRetrieved[II])**2 / np.max(np.abs(pulseRetrieved[II])**2),'k',linewidth = 3,label = 'Retrieved spectrum')\n axL.set_ylabel('Normalized power density')\n axL.set_xlabel('Wavelengths [nm]')\n axR.set_ylabel('Spectral phase (x $\\pi$) [rad]')\n \n retrievedPhase = np.unwrap(np.angle(pulseRetrieved))\n retrievedPhase -= np.average(retrievedPhase[II],weights = np.abs(pulseRetrieved[II])**2)\n \n axR.plot(wavelengths[II][IIplotphase]*1e9,retrievedPhase[II][IIplotphase]/np.pi,'--k')\n axR.set_ylim(retrievedPhase[II][IIplotphase].min()-np.abs(retrievedPhase[II][IIplotphase].min()*0.1)/np.pi,retrievedPhase[II][IIplotphase].max()*1.1/np.pi)\n axR.set_ylim(-10,10)\n plt.xlim(wavelengthsSpectro[0]*1.8e9,wavelengthsSpectro[-1]*2.2e9)\n \n \n plt.figure()\n plt.pcolormesh((C/traceFrequencies)*1e9,delays,traceRetrieved)\n plt.title('Retrieved trace')\n plt.xlabel('Wavelengths [nm]')\n plt.ylabel('Dispersion added [mm of Sapphire]')\n plt.xlim(wavelengthsSpectro[0]*1e9,wavelengthsSpectro[-1]*1e9)\n plt.colorbar()\n\n return axL\n\n\ndef freq2time(frequency, spectrum):\n # Interpolate over new frequency grid, including negative w components\n v_max = frequency[-1]\n v_min = -v_max\n N = len(spectrum)\n dv = v_max/N\n \n new_v = np.hstack((np.array([0]),np.linspace(dv,v_max,N),np.linspace(v_min,-dv,N)))\n \n interp_marginal = interp(frequency,spectrum,'quadratic',bounds_error=False,fill_value=0)\n spectrum = interp_marginal(new_v)\n\n E = np.fft.fftshift(np.fft.ifft(spectrum))\n t = np.fft.fftshift(np.fft.fftfreq(2*N+1,dv))\n\n return t, E\n\ndef RANA(delays,w_shg,trace_w,w_fund):\n \n a = 0.09\n b = 0.425\n c = 0.1\n \n # Integrate over delay axis\n marginal_w = simpson(trace_w,delays,axis = 0)\n \n marginal_w = fq.ezsmooth(marginal_w,11,'hanning')\n \n marginal_w[marginal_w<0] = 0\n \n # Interpolate over new frequency grid, including negative w components\n w_max = w_shg[-1]*2\n w_min = -w_max\n N = len(marginal_w)*2\n dw = w_max/N\n \n new_w = np.hstack((np.array([0]),np.linspace(dw,w_max,N),np.linspace(w_min,-dw,N)))\n \n interp_marginal = interp(w_shg,marginal_w,'quadratic',bounds_error=False,fill_value=0)\n marginal_w = interp_marginal(new_w)\n\n S = np.fft.fftshift(np.fft.ifft(marginal_w))\n t = np.fft.fftshift(np.fft.fftfreq(2*N+1,dw/2/np.pi))\n\n s_p = np.sqrt(S)\n s_m = -np.sqrt(S)\n \n s = np.zeros_like(s_p)\n for ii,tau in enumerate(t):\n \n if ii==0:\n s[ii] = s_m[ii]\n \n if ii==1:\n ds0_p = abs(s_p[ii] - s[ii-1])\n ds0_m = abs(s_m[ii] - s[ii-1])\n \n ds1_p = 0\n ds1_m = 0\n \n ds2_p = 0\n ds2_m = 0\n \n \n if ii==2:\n ds0_p = abs(s_p[ii] - s[ii-1])\n ds0_m = abs(s_m[ii] - s[ii-1])\n \n ds1_p = abs( ds0_p - (s[ii-1] - s[ii-2]) )\n ds1_m = abs( ds0_m - (s[ii-1] - s[ii-2]) )\n \n ds2_p = 0\n ds2_m = 0\n \n else:\n ds0_p = abs(s_p[ii] - s[ii-1])\n ds0_m = abs(s_m[ii] - s[ii-1])\n \n ds1_p = abs( ds0_p - (s[ii-1] - s[ii-2]) )\n ds1_m = abs( ds0_m - (s[ii-1] - s[ii-2]) )\n \n ds2_p = abs( ds1_p - ( (s[ii-1] - s[ii-2]) - (s[ii-2] - s[ii-3]) ) )\n ds2_m = abs( ds1_m - ( (s[ii-1] - s[ii-2]) - (s[ii-2] - s[ii-3]) ) )\n \n e_p = a*ds0_p**2 + b*ds1_p**2 + c*ds2_p**2\n e_m = a*ds0_m**2 + b*ds1_m**2 + c*ds2_m**2\n \n \n if e_p < e_m:\n s[ii] = s_p[ii]\n else:\n s[ii] = s_m[ii]\n \n spectrum = np.fft.fftshift(np.fft.fft(s*np.hanning(len(s))))\n v = np.fft.fftshift((np.fft.fftfreq(2*N+1,t[1]-t[0])))\n spectrum = np.abs(spectrum)**0.5\n spectrum_interp = interp(v*2*np.pi,spectrum,'quadratic',bounds_error=False,fill_value=0)\n spectrum = spectrum_interp(w_fund)\n spectrum -= np.min(spectrum)\n spectrum /= np.max(spectrum)\n \n \n spectrum = np.complex128(spectrum) #* np.exp(1j*np.random.normal(0,np.pi/4,len(spectrum)))\n return spectrum", "sub_path": "build/lib/femtoQ/pr_backend/dscanlib.py", "file_name": "dscanlib.py", "file_ext": "py", "file_size_in_byte": 6803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.linspace", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "scipy.constants.c", "line_number": 74, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.unwrap", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 92, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "scipy.constants.c", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 115, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 121, "usage_type": "call"}, {"api_name": "scipy.integrate.simps", "line_number": 132, "usage_type": "call"}, {"api_name": "femtoQ.tools.ezsmooth", "line_number": 134, "usage_type": "call"}, {"api_name": "femtoQ.tools", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.hanning", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftfreq", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 203, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 204, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.complex128", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "602612693", "text": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: Apache-2.0\n\"\"\"\n\nimport requests\n\n\ndef call_and_get_response(method: str, action: str, host: str, port: str, request_param_generator, use_ssl: bool,\n query='', extra_headers=None):\n if extra_headers is None:\n extra_headers = {}\n\n method = method.upper()\n protocol = 'https' if use_ssl else 'http'\n\n request_params = request_param_generator.generate_request_params(method=method, action=action, query=query,\n host=host, port=port, protocol=protocol,\n headers=extra_headers)\n headers = request_params['headers'] if request_params['headers'] is not None else {}\n\n if method == 'GET':\n res = requests.get(url=request_params['url'], params=request_params['params'], headers=headers)\n elif method == 'DELETE':\n res = requests.delete(url=request_params['url'], params=request_params['params'], headers=headers)\n elif method == 'POST':\n res = requests.post(url=request_params['url'], data=request_params['params'], headers=headers)\n else:\n raise NotImplementedError(f'Use of method {method} has not been implemented in call_and_get_response')\n\n res.raise_for_status()\n return res\n", "sub_path": "src/graph_notebook/request_param_generator/call_and_get_response.py", "file_name": "call_and_get_response.py", "file_ext": "py", "file_size_in_byte": 1396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "421397346", "text": "# 魔法方法,也叫魔法方法,是类里面的特殊的一些方法\n# 特点,\n# 1. 不需要手动调用,会在合适的时机自动调用\n# 2. 这些方法都是__开始__ ,使用__ 结束\n# 3. 方法名都是系统规定好的,在合适的时机自己调用\nimport time\n\n\nclass Person(object):\n def __init__(self, name, age):\n # 在创建对象时,会自动调用这个方法\n print('__init__被调用了')\n self.name = name\n self.age = age\n\n def __del__(self):\n # 当对象被销毁时,会自动调用这个方法\n print('__del__方法被调用了')\n\n def __repr__(self):\n # return 'hello'\n return '姓名:{},年龄{}'.format(self.name, self.age)\n\n def __str__(self):\n return 'good'\n\n def __call__(self, *args, **kwargs):\n print('call方法被调用了')\n # args 是一个元组\n # kwargs 是一个字典\n print('args={},kwargs={}'.format(args, kwargs))\n fn = kwargs['fn']\n return fn(args[0], args[1])\n\n\nP = Person('张三', 18)\n# 如果不做任何修改,直接打印一个对象,是文件的__name__类型,内存地址\nprint(P) # 对象类型和,内存地址 <__main__.Person object at 0x000002335F83DB08>\n# 当打印一个对象的时候,会调用这个对象的__str__或者__repr__方法\n# 如果两个方法都写了,选择__str__\n# del P\n# time.sleep(10)\nprint(repr(P)) # 调用内置函数repr,会触发对象的__repr__方法\nprint(P.__repr__()) # 手动调用魔法方法,一般不手动调用\n# P() # 对象名 ==》 嗲用的是对象P.call 方法,如果没有回报错\nn = P(1, 2, fn=lambda x, y: x + y) #把一个对象当做一个对象调用\nprint(n)\n# 比较str 和repr方法\nimport datetime\n\nx = datetime.datetime(2020, 2, 24, 16, 17, 45, 200)\nprint(x) # str 方法 #可读性比较好\nprint(repr(x)) # repr 方法\n", "sub_path": "11.面向对象/03.魔法方法.py", "file_name": "03.魔法方法.py", "file_ext": "py", "file_size_in_byte": 1904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "datetime.datetime", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "410797617", "text": "import httplib, subprocess\nimport json\nimport matplotlib.pyplot as plt\n\nc = httplib.HTTPConnection('192.168.56.101', 3000)\n\ndata = []\nfor i in range(1000):\n c.request('GET', '/', '{}')\n doc = c.getresponse().read()\n json_data = json.loads(json.loads(doc))\n data.append(json_data['difficulty'])\ndata.sort()\n\nplt.plot(data)\nplt.show()\nplt.savefig(\"graph\")\n", "sub_path": "testcode/graph-of-difficulty.py", "file_name": "graph-of-difficulty.py", "file_ext": "py", "file_size_in_byte": 366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "httplib.HTTPConnection", "line_number": 5, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "14374487", "text": "from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import KFold\nimport numpy as np\n\n\n#design matrix that is passed into random forrest\ndef get_rf_design_matrix(voxels, data):\n ss = ssm.SceneSlicer('test_data.nii', 'scenes.csv')\n day_night, int_ext = ss.get_scene_slices()\n new_X = np.zeros((data.shape[-1], len(voxels)))\n for num in range(len(voxels)):\n new_X[:, num] = data[voxels[num]]\n return new_X, day_night\n\n\ndef rf_accuracy(X_train, y_train, X_test, y_test, est=1000, feat=10, depth=10):\n model = RandomForestClassifier(n_estimators=est,\n max_features=feat,\n max_depth=depth)\n model.fit(X_train, y_train)\n results = model.score(X_test, y_test)\n return results\n\n\n# Defaults split into 80/20\ndef cv_rf_accuracy(X, y, est=1000, feat=10, depth=10, num_folds=5):\n index_array = np.arange(X.shape[0])\n np.random.shuffle(index_array)\n X = X[index_array]\n y = y[index_array]\n kf = KFold(X.shape[0], n_folds=num_folds)\n avg_acc = 0\n for train, test in kf:\n avg_acc += rf_accuracy(X[train], y[train], X[test], y[test], est, feat,\n depth)\n return avg_acc / float(num_folds)\n", "sub_path": "code/stat159lambda/classification/rf.py", "file_name": "rf.py", "file_ext": "py", "file_size_in_byte": 1268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sklearn.cross_validation.KFold", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "295322783", "text": "import numpy as np\nimport time\n\nimport torch\nimport torch.nn as nn\n\nfrom abc import ABC, abstractmethod\n\nfrom .resnet import resnet18, resnet34, resnet50, resnet101\nfrom .utils import save_checkpoint, load_checkpoint\n\n\nclass BaseExtractor(ABC):\n\n def __init__(self):\n pass\n\n def run_epoch(self, data_loader, opt=None, cuda=True, back=True):\n loss_list = list()\n\n if back:\n for batch_idx, (Input, label) in enumerate(data_loader):\n print(\"Processing batch {}\".format(batch_idx), end='\\r')\n\n if cuda:\n Input = Input.cuda()\n label = label.cuda()\n\n prediction = self.forward(Input)\n loss = self.loss(prediction, label)\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n loss_list.append(loss.item())\n else:\n with torch.no_grad():\n for batch_idx, (Input, label) in enumerate(data_loader):\n print(\"Processing batch {}\".format(batch_idx), end='\\r')\n\n if cuda:\n Input = Input.cuda()\n label = label.cuda()\n\n prediction = self.forward(Input)\n loss = self.loss(prediction, label)\n\n loss_list.append(loss.item())\n\n return np.mean(np.array(loss_list))\n\n def train(self,\n data_loader,\n batch_size,\n epoch,\n val_data_loader=None,\n cuda=True,\n lr=1e-4):\n opt_Adam = torch.optim.Adam(filter(lambda p: p.requires_grad,\n self.net.parameters()),\n lr=lr)\n\n train_loss_list = list()\n val_loss_list = list()\n\n not_improve = 0\n last_loss = 1e8\n\n if cuda:\n self.net.cuda()\n\n for e in range(epoch):\n s_time = time.time()\n\n train_loss = self.run_epoch(data_loader,\n opt_Adam,\n cuda=cuda,\n back=True)\n train_loss_list.append(train_loss)\n cur_loss = train_loss\n\n if val_data_loader is not None:\n val_loss = self.run_epoch(val_data_loader,\n cuda=cuda,\n back=False)\n val_loss_list.append(val_loss)\n cur_loss = val_loss\n\n e_time = time.time()\n print(\"Epoch {}.\\tTrain loss: {}\".format(e, train_loss), end='')\n if val_data_loader is not None:\n print(\"\\tVal loss: {}\".format(val_loss), end='')\n print(\"\\tTime: {}s\".format(e_time - s_time))\n\n if cur_loss < last_loss:\n not_improve = 0\n last_loss = cur_loss\n\n save_checkpoint(self.store_path, self.model_name + \".h5\", e,\n self.net.state_dict(), opt_Adam.state_dict(),\n train_loss_list, val_loss_list)\n else:\n not_improve += 1\n\n if not_improve >= 10:\n print(\"Early stop at epoch {}\".format(e))\n return\n\n @abstractmethod\n def loss(self, prediction, label):\n pass\n\n @abstractmethod\n def forward(self, Input):\n pass\n\n @abstractmethod\n def get_feature(self, Input):\n \"\"\"\n Return Numpy cpu array\n \"\"\"\n pass\n\n\nclass TimeContrastiveFeatureExtractor(BaseExtractor):\n\n def __init__(self, n_segments=3, res_layers=18, store_path=None, input_dim=1):\n\n self.model_name = 'time_contrastive'\n self.store_path = store_path\n\n self.n_segments = n_segments\n self.res_layers = res_layers\n self.input_dim = input_dim\n\n self.net = TimeContrastiveNeuralNetwork(n_segments, res_layers,\n input_dim)\n\n self.loss_func = nn.NLLLoss()\n\n def loss(self, prediction, label):\n return self.loss_func(prediction, label)\n\n def forward(self, Input):\n return self.net(Input)\n\n def get_feature(self, Input):\n with torch.no_grad():\n return self.net.get_feature(Input).cpu().numpy().reshape(-1)\n\n\nclass TimeContrastiveNeuralNetwork(nn.Module):\n\n def __init__(self, n_segments, res_layers, input_dim):\n super(TimeContrastiveNeuralNetwork, self).__init__()\n\n self.n_segments = n_segments\n self.res_layers = res_layers\n self.input_dim = input_dim\n\n if self.res_layers == 18:\n self.extractor = resnet18(input_dim)\n elif self.res_layers == 34:\n self.extractor = resnet34(input_dim)\n elif self.res_layers == 50:\n self.extractor = resnet50(input_dim)\n elif self.res_layers == 101:\n self.extractor = resnet101(input_dim)\n\n self.fc = nn.Sequential(*[\n nn.Flatten(),\n nn.Linear(85504, self.n_segments),\n nn.LogSoftmax(dim=-1),\n ])\n\n def forward(self, Input):\n x = self.extractor(Input)\n x = self.fc(x)\n return x\n \n def get_feature(self, Input):\n return self.extractor(Input)\n", "sub_path": "Feature/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 5373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "abc.ABC", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 60, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.save_checkpoint", "line_number": 100, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 110, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 114, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 153, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 153, "usage_type": "name"}, {"api_name": "resnet.resnet18", "line_number": 163, "usage_type": "call"}, {"api_name": "resnet.resnet34", "line_number": 165, "usage_type": "call"}, {"api_name": "resnet.resnet50", "line_number": 167, "usage_type": "call"}, {"api_name": "resnet.resnet101", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}]} +{"seq_id": "140487109", "text": "#Webscraping Tutorial from https://www.youtube.com/watch?v=XQgXKtPSzUI\n#will grab URL\nfrom urllib.request import urlopen as uReq\n#will parse html text\nfrom bs4 import BeautifulSoup as soup \n\nmy_url='https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics%20card'\n# Opening up connection, grabbing the page\nuClient = uReq(my_url)\n#off loads the contnt into a variable\npage_html= uClient.read()\n#Close the content \nuClient.close()\n#html parsing \npage_soup = soup(page_html,\"html.parser\")\ncontainers=page_soup.findAll(\"div\",{\"class\":\"item-container\"})\ncontainer = containers[0]\ndivWithInfo=containers[0].find(\"div\",\"item-info\")\n#Finds the div with the title and image \nfor container in containers:\n brand = container.find(\"div\",\"item-info\").img[\"title\"]\n title_container = container.findAll(\"a\",{\"class\":\"item-title\"})\n product_name = title_container[0].text\n \n shipping_container = container.findAll(\"li\",{\"class\":\"price-ship\"})\n shipping = shipping_container[0].text.strip()\n \n print(\"brand: \"+ brand)\n print(\"product_name: :\"+product_name)\n print(\"shipping: \" + shipping)", "sub_path": "NewEggWebscraperTutorial.py", "file_name": "NewEggWebscraperTutorial.py", "file_ext": "py", "file_size_in_byte": 1122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "urllib.request.urlopen", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "127589910", "text": "# coding=utf-8\n__author__ = 'Warlock'\n\nfrom flask.ext.assets import Environment, Bundle\n\n\ndef bundle(app):\n assets = Environment(app)\n\n assets.debug = True\n\n theme_bundle(assets)\n base_bundle(assets)\n app_bundle(assets)\n\n return assets\n\n\ndef base_bundle(assets):\n\n js_vendor = Bundle('components/jquery/dist/jquery.js',\n 'js/vendor/jquery-ui.js',\n 'js/vendor/bootstrap/bootstrap-toggle.min.js',\n 'js/vendor/labjs/LAB.src.js' if assets.debug else 'js/vendor/labjs/LAB.js',\n 'components/knockout/dist/knockout.debug.js',\n 'components/select2/dist/js/select2.full.js',\n 'components/lodash/lodash.js',\n 'components/toastr/toastr.js',\n 'js/knockout.ext.js',\n filters='jsmin', output='gen/vendor_pack.js')\n\n assets.register('js_vendor', js_vendor)\n\n css_vendor = Bundle('css/vendor/jquery/jquery-ui.css',\n 'css/vendor/jquery/jquery-ui.theme.css',\n 'css/vendor/bootstrap/bootstrap-toggle.min.css',\n 'components/toastr/toastr.css',\n 'components/select2/dist/css/select2.css',\n 'components/select2-bootstrap-theme/dist/select2-bootstrap.css',\n filters='cssmin', output='gen/vendor_pack.css')\n\n assets.register('css_vendor', css_vendor)\n\n\ndef theme_bundle(assets):\n js_theme = Bundle('js/theme/wordeater/bootstrap.js',\n 'js/theme/wordeater/todo.js',\n 'js/theme/wordeater/app.plugin.js',\n 'theme_components/calendar/calendar.min.js',\n output='gen/theme_packed.js')\n\n assets.register('js_theme', js_theme)\n\n css_theme = Bundle('css/themes/wordeater/bootstrap.css',\n 'css/themes/wordeater/animate.css',\n 'css/themes/wordeater/font-awesome.min.css',\n 'css/themes/wordeater/font.css',\n 'css/themes/wordeater/plugin.css',\n 'css/themes/wordeater/todo.css',\n 'theme_components/calendar/calendar.css',\n filters='cssmin', output='gen/theme.css')\n\n assets.register('css_theme', css_theme)\n\n\ndef app_bundle(assets):\n js_app = Bundle('js/utils.js',\n 'js/rest.js',\n 'js/api.js',\n 'js/app.js',\n filters='jsmin', output='gen/app.js')\n\n assets.register('js_app', js_app)\n\n", "sub_path": "we-web/app/bundle.py", "file_name": "bundle.py", "file_ext": "py", "file_size_in_byte": 2644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask.ext.assets.Environment", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.ext.assets.Bundle", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "91842488", "text": "import os\nimport math\nimport argparse\n\nfrom tensorflow.python import keras\n\nfrom randnet.data.loader import DataLoader\nfrom randnet.data.mapper import DataSetMapper\nfrom randnet.model.randnet import RandNetSmall\n\n\ndef half_cosine_lr_schedule(epoch, total_n_epochs=100, initial_lr=0.1):\n x = (epoch / float(total_n_epochs)) * math.pi\n return initial_lr * 0.5 * (math.cos(x) + 1)\n\n\nclass TensorboardCallbackWithLR(keras.callbacks.TensorBoard):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def on_epoch_end(self, epoch, logs=None):\n logs.update({'lr': keras.backend.eval(self.model.optimizer.lr)})\n super().on_epoch_end(epoch, logs)\n\n\ndef train(experiment_dir=\"experiment\",\n dataset=\"cifar10\",\n batch_size=32,\n epochs=100,\n l2=0.0001,\n initial_lr=0.0004):\n data_set_mapper = DataSetMapper(dataset)\n\n data_loader = DataLoader(data_set_mapper.name,\n batch_size,\n train_split=data_set_mapper.train_split,\n val_split=data_set_mapper.val_split)\n\n regularizer = keras.regularizers.l2(l2)\n model = RandNetSmall(data_loader.num_classes, kernel_regularizer=regularizer, bias_regularizer=regularizer)\n optimizer = keras.optimizers.Adam(initial_lr)\n\n model.compile(\n optimizer=optimizer,\n loss='categorical_crossentropy',\n metrics=[keras.metrics.categorical_accuracy,\n keras.metrics.top_k_categorical_accuracy]\n )\n\n # model.build(data_loader.shape)\n # model.summary()\n\n log_dir = os.path.join(experiment_dir, \"logs\")\n tensorboard_callback = TensorboardCallbackWithLR(log_dir=log_dir, write_images=True)\n\n train_iterator = data_loader.train_one_shot_iterator\n val_iterator = data_loader.val_one_shot_iterator\n\n learning_rate_scheduler = keras.callbacks.LearningRateScheduler(\n lambda x: half_cosine_lr_schedule(x, initial_lr=initial_lr, total_n_epochs=epochs),\n verbose=1)\n\n filepath = os.path.join(experiment_dir, \"best_model_weights_checkpoint\")\n checkpoint_callback = keras.callbacks.ModelCheckpoint(filepath,\n monitor='val_acc',\n verbose=1,\n save_best_only=True,\n mode='max',\n save_weights_only=True)\n\n model.fit(train_iterator,\n steps_per_epoch=data_loader.train_steps_per_epoch,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=val_iterator,\n validation_steps=data_loader.val_steps_per_epoch,\n callbacks=[tensorboard_callback, learning_rate_scheduler, checkpoint_callback])\n\n weight_path = os.path.join(experiment_dir, \"weights/model_weights\")\n model.save_weights(weight_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Keras Randomly Wired Networks Training\")\n parser.add_argument(\"--experiment-dir\", default=\"experiment\", type=str)\n parser.add_argument(\"--dataset\", default=\"cifar10\", choices=list(DataSetMapper.VAL_SPLIT_MAPPING.keys()), type=str)\n parser.add_argument(\"--epochs\", default=100, type=int)\n parser.add_argument(\"--l2\", default=0.0001, type=float)\n parser.add_argument(\"--batch-size\", default=32, type=int)\n parser.add_argument(\"--initial-lr\", default=0.1, type=float)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n train(\n experiment_dir=args.experiment_dir,\n dataset=args.dataset,\n epochs=args.epochs,\n l2=args.l2,\n batch_size=args.batch_size,\n initial_lr=args.initial_lr\n )\n", "sub_path": "randnet/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "math.pi", "line_number": 13, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.callbacks", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 17, "usage_type": "name"}, {"api_name": "tensorflow.python.keras.backend.eval", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.backend", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 22, "usage_type": "name"}, {"api_name": "randnet.data.mapper.DataSetMapper", "line_number": 32, "usage_type": "call"}, {"api_name": "randnet.data.loader.DataLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.regularizers.l2", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.regularizers", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 39, "usage_type": "name"}, {"api_name": "randnet.model.randnet.RandNetSmall", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.optimizers.Adam", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.optimizers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.python.keras.metrics", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.python.keras.metrics", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 47, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.callbacks.LearningRateScheduler", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.callbacks", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.callbacks.ModelCheckpoint", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.callbacks", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 84, "usage_type": "call"}, {"api_name": "randnet.data.mapper.DataSetMapper.VAL_SPLIT_MAPPING.keys", "line_number": 86, "usage_type": "call"}, {"api_name": "randnet.data.mapper.DataSetMapper.VAL_SPLIT_MAPPING", "line_number": 86, "usage_type": "attribute"}, {"api_name": "randnet.data.mapper.DataSetMapper", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "502845277", "text": "from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom .models import User, Chat, Message\nfrom .serializers import UserSerializer, ChatSerializer, MessageSerializer\nfrom rest_framework import status\nfrom rest_framework.views import APIView\n\nclass APIUser(APIView):\n def post(self, request):\n seriailzer = UserSerializer(data=request.data)\n if seriailzer.is_valid():\n if 'username' in request.data:\n try:\n seriailzer.save()\n except:\n return Response(seriailzer.errors, \n status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'id':seriailzer.data['pk']}, \n status.HTTP_201_CREATED)\n return Response(seriailzer.errors, \n status.HTTP_400_BAD_REQUEST)\n\nclass APIChat(APIView):\n def post(self, request):\n seriailzer = ChatSerializer(data=request.data)\n if seriailzer.is_valid():\n if 'name' and 'users' in request.data:\n try:\n seriailzer.save()\n except:\n return Response(seriailzer.errors, \n status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'id':seriailzer.data['pk']}, \n status.HTTP_201_CREATED)\n return Response(seriailzer.errors, \n status.HTTP_400_BAD_REQUEST)\n\nclass APIMessage(APIView):\n def post(self, request):\n seriailzer = MessageSerializer(data=request.data)\n if seriailzer.is_valid():\n if 'chat' and 'author' and 'text' in request.data:\n try:\n seriailzer.save()\n except:\n return Response(seriailzer.errors, \n status.HTTP_400_BAD_REQUEST)\n else:\n print(type(seriailzer.data))\n return Response({'id':seriailzer.data['pk']}, \n status.HTTP_201_CREATED)\n return Response(seriailzer.errors, \n status.HTTP_400_BAD_REQUEST)\n\nclass APIGetChat(APIView):\n def post(self, request):\n if type(request.data) != int:\n if 'user' in request.data:\n chats = Chat.objects.filter(users=request.data['user'])\n print(request.data)\n seriailzer = ChatSerializer(chats.order_by('-created_at'), many=True)\n return Response(seriailzer.data, \n status.HTTP_201_CREATED)\n return Response('400 Bad Request')\n\nclass APIGetMessage(APIView):\n def post(self, request):\n if type(request.data) != int:\n if 'chat' in request.data:\n messages = Message.objects.filter(chat=request.data['chat'])\n print(request.data)\n seriailzer = MessageSerializer(messages.order_by('-created_at'), many=True)\n return Response(seriailzer.data, \n status.HTTP_201_CREATED)\n return Response('400 Bad Request')\n\n\n# Create your views here.\n", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 8, "usage_type": "name"}, {"api_name": "serializers.UserSerializer", "line_number": 10, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 24, "usage_type": "name"}, {"api_name": "serializers.ChatSerializer", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 40, "usage_type": "name"}, {"api_name": "serializers.MessageSerializer", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 49, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 49, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 54, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 55, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 57, "usage_type": "name"}, {"api_name": "models.Chat.objects.filter", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Chat.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.Chat", "line_number": 61, "usage_type": "name"}, {"api_name": "serializers.ChatSerializer", "line_number": 63, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 65, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 65, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 68, "usage_type": "name"}, {"api_name": "models.Message.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "models.Message.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.Message", "line_number": 72, "usage_type": "name"}, {"api_name": "serializers.MessageSerializer", "line_number": 74, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 75, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 76, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 76, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "534721646", "text": "\nfrom requests_html import HTMLSession\nfrom bs4 import BeautifulSoup\nimport random\nimport telebot\nbot=telebot.TeleBot(token)\n\nsession=HTMLSession()\n\ndef presents(html):\n k=1\n m=1\n gifts=dict()\n while m<5:\n page={'page':m}\n resp=session.get(html, params=page)\n soup=BeautifulSoup(resp.html.html, 'html.parser')\n table=soup.find_all('div', class_=\"product-intro\")\n for element in table:\n gifts[k]={'title': element.find('h3', class_=\"product-intro__title\").text.replace('\\n', \"\").strip(), \\\n \"photo\": element.find('div', class_=\"product-intro__image-box\").find_next('img').get('data-src'),\\\n 'price': element.find('div', class_=\"product-intro__price\").text.replace('\\n', \"\").replace('\\t',\\\n \"\").replace(' ', \"\").strip(),\\\n \"url\":element.find('div', class_=\"product-intro__image-box\").find_next('a').get('href')}\n k+=1\n m+=1\n return gifts\n\n@bot.message_handler(commands=['start', 'help'])\ndef send_welcome(message):\n button=telebot.types.ReplyKeyboardMarkup(True)\n button.row(\"Стильна кухня\", 'Затишний дім')\n button.row(\"Аксесуари\", \"Солодощі\")\n button.row(\"Креативні подарунки\")\n bot.send_message(message.chat.id, \"Ласкаво просимо в Gift Ideas! Щоб переглядати ідеї \"+\\\n \"подарунків оберіть категорію в якій бажаєте обрати\"+\\\n \" подарунок!\", reply_markup=button)\n\ndef send(message,url):\n num=random.randint(1,len(presents(url)))\n bot.send_photo(message.chat.id, presents(url)[num][\"photo\"], caption=presents(url)[num]['title']+\\\n \"\\n\"+\"\"+presents(url)[num]['price']+\"\"+\"\\n\"+\\\n 'Можна придбати в магазині \"Хочу вже\"(м.Чернівці, вул.Руська, 10)'+\\\n \" або на сайті за посиланням: \"+ presents(url)[num]['url'], parse_mode=\"HTML\")\n\n@bot.message_handler(content_types='text')\ndef button_type(message):\n if message.text==\"Стильна кухня\":\n url=\"https://hochuvzhe.ua/ua/catalog/5-stilnaya-kukhnya\"\n send(message,url)\n elif message.text=='Затишний дім':\n url=\"https://hochuvzhe.ua/ua/catalog/4-uyutnyy-dom\"\n send(message,url)\n elif message.text=='Аксесуари':\n url=\"https://hochuvzhe.ua/ua/catalog/7-aksessuary\"\n send(message,url)\n elif message.text=='Солодощі':\n url=\"https://hochuvzhe.ua/ua/catalog/8-vkusnosti\"\n send(message,url)\n elif message.text=='Креативні подарунки':\n url=\"https://hochuvzhe.ua/ua/catalog/695-originalnye-podarki\"\n send(message,url)\n \n \n\n\nif __name__ == \"__main__\":\n bot.polling()\n\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 2955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "telebot.TeleBot", "line_number": 6, "usage_type": "call"}, {"api_name": "requests_html.HTMLSession", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 17, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 31, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 31, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "446463535", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\n\niris = pd.read_csv('bk_ebs_ai/3-2/Iris.csv')\n\n\n# 꽃잎(Petal)의 길이와 너비에 따른 분산형(scatter) 그래프\nfig = iris[iris.Species == 'Iris-setosa'].plot.scatter(\n x='PetalLengthCm', y='PetalWidthCm', color='orange', label='Setosa')\niris[iris.Species == 'Iris-versicolor'].plot.scatter(\n x='PetalLengthCm', y='PetalWidthCm', color='blue', label='versicolor', ax=fig)\niris[iris.Species == 'Iris-virginica'].plot.scatter(\n x='PetalLengthCm', y='PetalWidthCm', color='green', label='virginica', ax=fig)\nfig.set_xlabel(\"Petal Length\")\nfig.set_ylabel(\"Petal Width\")\nfig.set_title(\" Petal Length VS Width\")\nfig = plt.gcf()\nfig.set_size_inches(10, 6)\nplt.show()\n", "sub_path": "bk_ebs_ai_math/3-2/3-2-07.py", "file_name": "3-2-07.py", "file_ext": "py", "file_size_in_byte": 732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "248275880", "text": "import json\nimport os\n\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.decorators.http import require_http_methods\n\nfrom authentication import authenticator\nfrom script import script_controller\nfrom server.utils import HandleError\n\n\n@require_http_methods(['GET'])\n@authenticator.RequireAuth\n@HandleError\ndef ListScripts(request):\n try:\n return JsonResponse(\n {'scripts': script_controller.ScriptController().ListScripts()})\n except Exception as e:\n return JsonResponse({'error': str(e)})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef CreateScript(request):\n reqJson = json.loads(request.body)\n name = reqJson['name']\n return JsonResponse(\n {'script': script_controller.ScriptController().CreateScript(name)})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef GetScriptDraft(request):\n reqJson = json.loads(request.body)\n script_id = reqJson['script_id']\n return JsonResponse(\n {'draft': script_controller.ScriptController().GetScriptDraft(script_id)})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef UpdateScriptDraft(request):\n reqJson = json.loads(request.body)\n script_id = reqJson['script_id']\n name = reqJson['name']\n content = reqJson['content']\n timeout_msec = reqJson['timeout_msec']\n ctrl = script_controller.ScriptController()\n ctrl.UpdateScript(script_id, name)\n return JsonResponse(\n {'draft': ctrl.UpdateScriptDraft(script_id, content, timeout_msec)})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef SearchHistory(request):\n reqJson = json.loads(request.body)\n script_id = reqJson['script_id']\n node_id = reqJson['node_id']\n ctrl = script_controller.ScriptController()\n return JsonResponse(\n {'records': ctrl.SearchScriptExecRecord(script_id, node_id)})\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef ExecScript(request):\n reqJson = json.loads(request.body)\n script_id = reqJson['script_id']\n node_ids = reqJson['node_ids']\n ctrl = script_controller.ScriptController()\n return JsonResponse(ctrl.ExecScript(script_id, node_ids))\n\n\n@require_http_methods(['POST'])\n@authenticator.RequireAuth\n@HandleError\ndef ReExecScript(request):\n reqJson = json.loads(request.body)\n record_ids = reqJson['record_ids']\n ctrl = script_controller.ScriptController()\n ctrl.ReExecScript(record_ids)\n return JsonResponse({})", "sub_path": "script/apis.py", "file_name": "apis.py", "file_ext": "py", "file_size_in_byte": 2446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.http.JsonResponse", "line_number": 17, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 18, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 18, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 20, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 12, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 13, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 13, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 30, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 30, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 23, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 24, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 24, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 25, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 37, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 40, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 40, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 33, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 34, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 34, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 52, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 43, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 44, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 44, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 45, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 65, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 65, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 66, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 58, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 59, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 59, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 60, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 77, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 77, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 78, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 70, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 71, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 71, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 72, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 85, "usage_type": "call"}, {"api_name": "script.script_controller.ScriptController", "line_number": 87, "usage_type": "call"}, {"api_name": "script.script_controller", "line_number": 87, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 81, "usage_type": "call"}, {"api_name": "authentication.authenticator.RequireAuth", "line_number": 82, "usage_type": "attribute"}, {"api_name": "authentication.authenticator", "line_number": 82, "usage_type": "name"}, {"api_name": "server.utils.HandleError", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "229486181", "text": "import csv\nfrom django.shortcuts import render, HttpResponse,HttpResponseRedirect\nfrom .models import Assets\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n# Create your views here.\n'''\ndef AssetsListView(request):\n if request.user.is_authenticated():\n objs = Assets.objects.all()\n return render(request, \"assets.html\", context={'objs': objs}) # context 必须是字典类型\n else:\n return HttpResponseRedirect(\"/account/login/\")\n'''\n@require_http_methods([\"GET\",])\n@login_required(login_url=\"/account/login/\")\ndef AssetsListView(request):\n search_value=request.GET.get('search_value')\n if search_value:\n objs=Assets.objects.filter(hostname=search_value)\n else:\n objs =Assets.objects.all()\n return render(request, \"assets.html\", context={'content': objs})\n\n@require_http_methods([\"DELETE\",])\ndef AssetsDeleteView(request,pk):\n Assets.objects.get(pk=pk).delete()\n return HttpResponse(\"Delete ok.\")\n\n\n@require_http_methods([\"POST\",])\ndef AssetsAddView(request):\n data=request.POST.dict()\n full_message=True\n if data['status']=='---请选择---':\n full_message = False\n for str in data.values():\n if str == '':\n full_message = False\n if full_message:\n Assets.objects.create(**data)\n return HttpResponse(\"Create ok\")\n else:\n return HttpResponse(\" Incomplete information\")\n\n\n@require_http_methods([\"GET\",])\n@login_required(login_url=\"/account/login/\")\ndef AssetsDetailView(request,pk):\n obj = Assets.objects.get(pk=pk)\n data = model_to_dict(obj)\n return JsonResponse(data)\n\n\ndef AssetsEditView(request):\n data = request.POST.dict()\n print(data)\n pk = data.pop(\"pk\", None)\n retdata = {}\n if not pk:\n retdata['code'] = -1\n retdata['msg'] = \"pk not found\"\n else:\n Assets.objects.filter(pk=pk).update(**data)\n\n retdata['code'] = 0\n retdata['msg'] = \"Edit ok.\"\n\n return JsonResponse(retdata)\n\n\ndef AssetsExportCsvView(request):\n # Create the HttpResponse object with the appropriate CSV header.\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"somefilename.csv\"'\n writer = csv.writer(response)\n writer.writerow(\n ['ID', 'hostname', 'cpu_num', 'cpu_model', 'mem_total', 'disk', 'public_ip', 'private_ip', 'remote_ip',\n 'status', 'os_system', 'service_line', 'frame', 'op', 'remark', 'create_time', 'update_time'])\n objs = Assets.objects.all()\n for obj in objs.values_list():\n writer.writerow(list(obj))\n return HttpResponse(\"Export ok\")", "sub_path": "lesson11/pangya/webapp/assets/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "models.Assets.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Assets.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Assets.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 30, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Assets.objects.create", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 46, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Assets.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 52, "usage_type": "name"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_http_methods", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Assets.objects.filter", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 66, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 76, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Assets.objects.all", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Assets.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.Assets", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "308725081", "text": "import h5py\nimport os\nimport numpy as np\n\nwindows_size = 17\n\nh5py_Path = r\"F:\\DNA_RNA_deeplearning\\RNA_file\\RNA_hp5y\"\noutput_Path = r\"F:\\DNA_RNA_deeplearning\\RNA_file\\nx26_hp5y\"\nos.chdir(h5py_Path)\n\nprint(\"the pssm path:\",h5py_Path)\n\nh5py_Path_listdir = os.listdir(h5py_Path)\n\n\na = np.zeros((windows_size,26))\n\nfor h5py_file_name in h5py_Path_listdir:\n f = h5py.File(h5py_file_name, 'r')\n h5py_data = f['train']\n print(h5py_file_name)\n if h5py_data.shape[0]>(windows_size-1):\n data_x = np.zeros(((h5py_data.shape[0]-windows_size+1),windows_size,26))\n data_y = np.zeros(h5py_data.shape[0]-windows_size+1)\n\n counter = (windows_size-1)/2\n while(counter < (h5py_data.shape[0]-(windows_size-1)/2)):\n data_x[int(counter-(windows_size-1)/2),:,:] = h5py_data[int(counter-(windows_size-1)/2):int(counter+(windows_size-1)/2+1),:-1]\n #a = h5py_data[(counter-(windows_size-1)/2):(counter+(windows_size-1)/2+1),:-1]\n if np.sum(h5py_data[int(counter),-1]):\n data_y[int(counter - (windows_size - 1) / 2)] = 1\n else:\n data_y[int(counter - (windows_size - 1) / 2)] = 0\n counter = counter + windows_size\n\n row_num = 0\n\n p_data_x = np.zeros((1,windows_size,26))\n p_data_y = np.zeros((1))\n n_data_x = np.zeros((1,windows_size,26))\n n_data_y = np.zeros((1))\n while row_num < data_y.shape[0]:\n if data_y[row_num]:\n p_data_x= np.append(p_data_x,data_x[row_num:row_num+1,:,:], axis = 0)\n p_data_y= np.append(p_data_y,data_y[row_num:row_num+1], axis=0)\n else:\n n_data_x= np.append(n_data_x,data_x[row_num:row_num+1,:,:], axis = 0)\n n_data_y= np.append(n_data_y,data_y[row_num:row_num+1], axis=0)\n row_num = row_num + 1\n p_data_x = p_data_x[1:,:,:]\n p_data_y = p_data_y[1:]\n n_data_x = n_data_x[1:,:,:]\n n_data_y = n_data_y[1:]\n\n p_row_num = p_data_y.shape[0]\n # print(p_data_y)\n\n p_data_x = np.append(p_data_x, n_data_x[:p_row_num,:,:], axis=0)\n p_data_y = np.append(p_data_y, n_data_y[:p_row_num], axis=0)\n\n os.chdir(output_Path)\n if p_row_num :\n f=h5py.File(str(windows_size)+\"x25_\"+h5py_file_name,\"w\")\n f.create_dataset('train_x', data = p_data_x)\n f.create_dataset('train_y', data = p_data_y)\n os.chdir(h5py_Path)\n\n", "sub_path": "A_间隔式读取氨基酸矩阵25x25.py", "file_name": "A_间隔式读取氨基酸矩阵25x25.py", "file_ext": "py", "file_size_in_byte": 2358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 59, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 61, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 63, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "349824424", "text": "\nimport csv\nimport os\nfrom collections import OrderedDict\n\ndef data_dir():\n p = os.path.realpath(__file__)\n return os.path.abspath(os.path.join(p, os.pardir, os.pardir, \"data\"))\n \nclass OrderedDictReader:\n def __init__(self, f, fieldnames=None, restkey=None, restval=None,\n dialect=\"excel\", *args, **kwds):\n self._fieldnames = fieldnames # list of keys for the dict\n self.restkey = restkey # key to catch long rows\n self.restval = restval # default value for short rows\n self.reader = csv.reader(f, dialect, *args, **kwds)\n self.dialect = dialect\n self.line_num = 0\n\n def __iter__(self):\n return self\n\n @property\n def fieldnames(self):\n if self._fieldnames is None:\n try:\n self._fieldnames = self.reader.next()\n except StopIteration:\n pass\n self.line_num = self.reader.line_num\n return self._fieldnames\n\n @fieldnames.setter\n def fieldnames(self, value):\n self._fieldnames = value\n\n def next(self):\n if self.line_num == 0:\n # Used only for its side effect.\n self.fieldnames\n row = self.reader.next()\n self.line_num = self.reader.line_num\n\n # unlike the basic reader, we prefer not to return blanks,\n # because we will typically wind up with a dict full of None\n # values\n while row == []:\n row = self.reader.next()\n d = OrderedDict(zip(self.fieldnames, row))\n lf = len(self.fieldnames)\n lr = len(row)\n if lf < lr:\n d[self.restkey] = row[lf:]\n elif lf > lr:\n for key in self.fieldnames[lr:]:\n d[key] = self.restval\n return d\n\n\nclass DataChannel(object):\n\n def __init__(self, data, count=10, interval=1, line_num=0):\n self.data = data\n self.line_num = line_num\n self.count = count\n self.interval = interval\n \n def __iter__(self):\n return self\n \n def get_current_line_num(self):\n return self.line_num\n \n def next(self):\n d = []\n \n for n in range(self.count):\n dd = self.data[self.line_num]\n self.line_num = self.line_num + 1\n if self.line_num >= len (self.data):\n self.line_num = 0\n \n d.append(dd)\n \n return d\n \nimport json\n \ndef get_metadata(sep='.'):\n meta = os.path.join(data_dir(), \"meta.json\")\n d = {}\n with open(meta, 'rU') as data_file: \n data = json.load(data_file)\n for n,v in data.items():\n for nn, vv in v.items():\n for vvv in vv:\n d[vvv] = \"%s%s%s%s%s\" % (n, sep, nn, sep, vvv)\n data_file.close()\n \n return d\n \n\nclass AlertEventChannel(object):\n\n def __init__(self, data, count=10, interval=1, line_num=0):\n self.data = data\n self.line_num = line_num\n self.count = count\n self.interval = interval\n \n def get_current_line_num(self):\n return self.line_num\n \n def __iter__(self):\n return self\n \n def next(self):\n d = []\n \n for n in range(self.count):\n dd = self.data[self.line_num]\n self.line_num = self.line_num + 1\n if self.line_num >= len (self.data):\n self.line_num = 0\n \n d.append(dd)\n \n return d\n \n\n", "sub_path": "bin/uaserver.py", "file_name": "uaserver.py", "file_ext": "py", "file_size_in_byte": 3543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.realpath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 8, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "41370361", "text": "import tkinter as tk\r\nimport math\r\nimport random\r\nfrom tkinter.font import Font\r\nfrom PIL import Image, ImageTk\r\n\r\n\r\nclass AppView(tk.Canvas):\r\n\r\n def __init__(self, master, width, height):\r\n self._master = master\r\n # Width of screen\r\n self._width = width\r\n # Height of screen\r\n self._height = height\r\n tk.Canvas.__init__(self, master, width=self._width,\r\n height=self._height, bg=\"#ffe6f9\")\r\n\r\n self._images = []\r\n self._images.append(ImageTk.PhotoImage(Image.open(\r\n \"./images/water.png\").resize((50, 50), Image.ANTIALIAS)))\r\n self._images.append(ImageTk.PhotoImage(Image.open(\r\n \"./images/sand.png\").resize((50, 50), Image.ANTIALIAS)))\r\n self._images.append(ImageTk.PhotoImage(Image.open(\r\n \"./images/grass.png\").resize((50, 50), Image.ANTIALIAS)))\r\n self._images.append(ImageTk.PhotoImage(Image.open(\r\n \"./images/snow.png\").resize((50, 50), Image.ANTIALIAS)))\r\n self._images.append(ImageTk.PhotoImage(Image.open(\r\n \"./images/moon.png\").resize((70, 70), Image.ANTIALIAS)))\r\n\r\n self._props_fill = \"sky blue\"\r\n self._seed_fill = \"tan2\"\r\n self._text_font = Font(family=\"Consolas\", size=18)\r\n\r\n '''\r\n Variables for measurements for view\r\n '''\r\n\r\n # Spacing from right and left\r\n self._right_space = 40\r\n self._left_space = 80\r\n\r\n # Spacing from top and bottom and divide\r\n self._top_space = 40\r\n self._bottom_space = 80\r\n self._divide_space = 80\r\n\r\n # Length of lower vert\r\n self._length_lower_vert = (\r\n self._height-(self._top_space+self._bottom_space+self._divide_space))/4\r\n # Length of upper vert\r\n self._length_upper_vert = 3 * \\\r\n ((self._height-(self._top_space+self._bottom_space+self._divide_space))/4)\r\n # Length of horizontal width\r\n self._length_horiz = self._width-(self._left_space+self._right_space)\r\n\r\n # Start and end of width pos\r\n self._start_width = self._width-(self._length_horiz+self._right_space)\r\n self._end_width = self._width-self._right_space\r\n\r\n # Start and end of upper pos\r\n self._start_upper_vert = self._height - \\\r\n (self._bottom_space+self._length_lower_vert +\r\n self._divide_space+self._length_upper_vert)\r\n self._end_upper_vert = self._height - \\\r\n (self._bottom_space+self._length_lower_vert+self._divide_space)\r\n\r\n # Start and end of lower pos\r\n self._start_lower_vert = self._height - \\\r\n (self._bottom_space+self._length_lower_vert)\r\n self._end_lower_vert = self._height-self._bottom_space\r\n\r\n # Spacing between horizontal markers\r\n self._dw_markers = self._length_horiz/5\r\n # Spacing between vertical markers\r\n self._dh_lower = self._length_lower_vert/5\r\n self._dh_upper = self._length_upper_vert/5\r\n\r\n def draw_icons(self):\r\n pass\r\n\r\n def draw_graph(self):\r\n self.delete(\"graph\")\r\n\r\n # Horizontal and Vertical axes for lower\r\n self.create_line(self._left_space, self._end_lower_vert, self._end_width+1, self._end_lower_vert,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n self.create_line(self._left_space, self._start_lower_vert-1, self._left_space, self._end_lower_vert,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n\r\n # Horizontal and Vertical markers for lower\r\n for x in range(6):\r\n self.create_line(self._left_space+self._dw_markers*x, self._end_lower_vert, self._left_space+self._dw_markers*x, self._end_lower_vert+5,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n for y in range(6):\r\n self.create_line(self._left_space, self._end_lower_vert-self._dh_lower*y, self._left_space-5, self._end_lower_vert-self._dh_lower*y,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n\r\n # Horizontal and Vertical markers for upper\r\n for x in range(6):\r\n self.create_line(self._left_space+self._dw_markers*x, self._end_upper_vert, self._left_space+self._dw_markers*x, self._end_upper_vert+5,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n for y in range(6):\r\n self.create_line(self._left_space, self._end_upper_vert-self._dh_upper*y, self._left_space-5, self._end_upper_vert-self._dh_upper*y,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n\r\n # Draw upper images\r\n for i in range(len(self._images)):\r\n self.create_image(self._left_space/2, self._end_upper_vert -\r\n (self._dh_upper*i) - self._dh_upper/2, image=self._images[i], tag=\"graph\")\r\n\r\n # Horizontal and Vertical axes for upper\r\n self.create_line(self._left_space, self._end_upper_vert, self._end_width+1, self._end_upper_vert,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n self.create_line(self._left_space, self._start_upper_vert-1, self._left_space, self._end_upper_vert,\r\n fill=self._props_fill, tag=\"graph\", width=2)\r\n\r\n # Text to screen\r\n\r\n self.create_text(self._width/2, self._end_upper_vert+self._top_space,\r\n text=\"Generated Seed\", font=self._text_font, fill=self._seed_fill, tag=\"graph\")\r\n\r\n self.create_text(self._width/2, self._top_space/2, text=\"Generated Terrain\",\r\n font=self._text_font, fill=self._seed_fill, tag=\"graph\")\r\n\r\n def refresh_view(self, perlin_noise):\r\n seed_length = perlin_noise.get_seed_length()\r\n seed = perlin_noise.get_seed()\r\n generations = perlin_noise.get_octaves()\r\n num_octaves = perlin_noise.get_octave_count()\r\n\r\n self.draw_graph()\r\n self.draw_seed(seed_length, seed)\r\n self.draw_terrain(seed_length, generations, num_octaves)\r\n\r\n def draw_seed(self, seed_length, seed):\r\n self._dw_seed = self._length_horiz/(seed_length-1)\r\n self.delete(\"seed\")\r\n\r\n # Draw seed to screen\r\n for x in range(seed_length):\r\n if x != seed_length-1:\r\n self.create_line(\r\n self._left_space+self._dw_seed*x, self._end_lower_vert -\r\n seed[x]*self._length_lower_vert,\r\n self._left_space+self._dw_seed*(x+1), self._end_lower_vert -\r\n seed[x+1]*self._length_lower_vert,\r\n tag=\"seed\", fill=self._seed_fill, width=1.5, smooth=True)\r\n\r\n def draw_terrain(self, seed_length, generations, num_octaves):\r\n self.delete(\"terrain\")\r\n\r\n # Draw terrain to screen\r\n dx = self._length_horiz/(seed_length-1)\r\n for octave in range(num_octaves):\r\n for x in range(seed_length):\r\n if x != seed_length-1:\r\n self.create_line(self._left_space+dx*x, self._end_upper_vert-generations[octave][x]*self._length_upper_vert, self._left_space+dx*(\r\n x+1), self._end_upper_vert-generations[octave][x+1]*self._length_upper_vert, tag=(\"terrain\", \"octave-{}\".format(octave)), width=1.5, fill=self._seed_fill, smooth=True)\r\n\r\n def refresh_octaves(self, seed_length, generations, num_octaves, change):\r\n if change == 1:\r\n dx = self._length_horiz/(seed_length-1)\r\n for x in range(seed_length):\r\n if x != seed_length-1:\r\n self.create_line(self._left_space+dx*x, self._end_upper_vert-generations[num_octaves-1][x]*self._length_upper_vert, self._left_space+dx*(\r\n x+1), self._end_upper_vert-generations[num_octaves-1][x+1]*self._length_upper_vert, tag=(\"terrain\", \"octave-{}\".format(num_octaves-1)), width=1.5, fill=self._seed_fill, smooth=True)\r\n else:\r\n self.delete(\"octave-{}\".format(num_octaves))\r\n", "sub_path": "src/view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 8033, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tkinter.Canvas", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.Canvas.__init__", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 22, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 23, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 24, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 24, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 26, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 27, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 28, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "tkinter.font.Font", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "458083834", "text": "# -----------------------------------------------------------------------------\n# Copyright (c) 2020 Nicolas P. Rougier. All rights reserved.\n# Distributed under the (new) BSD License.\n# -----------------------------------------------------------------------------\n# This example shows how to display a mesh\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom mpl3d import glm\nfrom mpl3d.mesh import Mesh\nfrom mpl3d.camera import Camera\n\n# Simplified wavefront loader (only vertices and faces)\ndef obj_load(filename):\n V, Vi = [], []\n with open(filename) as f:\n for line in f.readlines():\n if line.startswith('#'): continue\n values = line.split()\n if not values: continue\n if values[0] == 'v':\n V.append([float(x) for x in values[1:4]])\n elif values[0] == 'f' :\n Vi.append([int(x) for x in values[1:4]])\n return np.array(V), np.array(Vi)-1\n\n\n\n# --- main --------------------------------------------------------------------\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(4,4))\n ax = fig.add_axes([0,0,1,1], xlim=[-1,+1], ylim=[-1,+1], aspect=1)\n ax.axis(\"off\")\n\n camera = Camera(\"ortho\", scale=2)\n vertices, faces = obj_load(\"data/bunny.obj\")\n vertices = glm.fit_unit_cube(vertices)\n mesh = Mesh(ax, camera.transform, vertices, faces,\n cmap=plt.get_cmap(\"magma\"), edgecolors=(0,0,0,0.25))\n camera.connect(ax, mesh.update)\n plt.savefig(\"bunny.png\", dpi=600)\n plt.show()\n", "sub_path": "bunny.py", "file_name": "bunny.py", "file_ext": "py", "file_size_in_byte": 1603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "mpl3d.camera.Camera", "line_number": 36, "usage_type": "call"}, {"api_name": "mpl3d.glm.fit_unit_cube", "line_number": 38, "usage_type": "call"}, {"api_name": "mpl3d.glm", "line_number": 38, "usage_type": "name"}, {"api_name": "mpl3d.mesh.Mesh", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "531768912", "text": "#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Twist, Pose2D\nimport cv2, cv_bridge\nimport numpy as np\n\nclass Follower:\n def __init__(self):\n self.bridge = cv_bridge.CvBridge()\n self.image_sub = rospy.Subscriber('camera/rgb/image_raw', Image, self.image_callback)\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n\n def image_callback(self, msg):\n # Convert the image message to openCV type, scale size.\n image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')\n (h, w) = image.shape[:2]\n image_resized = cv2.resize(image, (w/4,h/4))\n\n # Convert to the HSV colour space.\n hsv = cv2.cvtColor(image_resized, cv2.COLOR_BGR2HSV)\n \n # Colour slicing to identify green objects\n # Colour of green object is [60, 159, 82]\n lower_green = np.array([50, 140, 75])\n upper_green = np.array([70, 170, 100])\n\n mask = cv2.inRange(hsv, lower_green, upper_green)\n\n # If there are multiple green objects/targets, only follow one.\n # Remove the one further away from the image.\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask)\n\n # Need Image width to get centroid of turtlebots view.\n _,w,_ = hsv.shape\n\n\n if num_labels > 1:\n # Iterate over labels, keep largest one.\n x_centroids = centroids[1:,0]\n min_index = np.argmin(abs(x_centroids - w/2))\n best_label = min_index + 1\n\n # Keep only best_label, remove others from image.\n mask = np.where(labels==best_label, np.uint8(255), np.uint8(0))\n obj_centroid = centroids[best_label, 0]\n else:\n # If there is only one label, then there is only background;\n # No object to beacon towards, so exit early. Need other behaviour\n # to move until an object comes into view.\n cv2.imshow(\"Mask\", mask)\n cv2.imshow(\"Masked image\", cv2.bitwise_and(hsv,hsv, mask=mask))\n cv2.waitKey(3)\n return\n\n # Now the mask only contains the largest target.\n # Can use the centroids already provided from connectedComponents\n \n \n target = cv2.bitwise_and(hsv, hsv, mask=mask)\n\n # Show current image\n cv2.imshow(\"Mask\", mask)\n cv2.imshow(\"Masked image\", target)\n cv2.waitKey(3)\n\n \n twist_msg = Twist()\n\n # Implement a proportional controller to beacon towards it\n err = obj_centroid - w/2\n twist_msg.linear.x = 0.2\n twist_msg.angular.z = -float(err)/400\n self.pub.publish(twist_msg)\n \n\n\nrospy.init_node('follower')\nfollower = Follower()\nrospy.spin()", "sub_path": "src/followbot/scripts/closest_centroid.py", "file_name": "closest_centroid.py", "file_ext": "py", "file_size_in_byte": 2663, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cv_bridge.CvBridge", "line_number": 10, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 11, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 11, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 12, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 12, "usage_type": "argument"}, {"api_name": "cv2.resize", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.connectedComponentsWithStats", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 66, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 69, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 79, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "402894743", "text": "\"\"\"\n This spider is a HarrishillUK spider created on top of the ATSSpider\n scrapy crawl harrishill -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.harrishill.co.uk/search/charity-jobs/any-jobtype/any-category/any-location/any-salary\"\n\n sample job url:\n http://www.harrishill.co.uk/Jobs/NS/HQ00029104/Permanent/Individual-Giving-and-Legacy-Manager\n\"\"\"\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass HarrishillUK(ATSSpider):\n\n name = \"harrishill\"\n\n def parse(self, response):\n selector = Selector(response)\n jobs = selector.xpath(\n '//div[@class=\"SearchResults\"]/div[@class=\"SearchItem\"]'\n )\n for job in jobs:\n job_url = job.xpath(\n './div[@class=\"Description\"]/a[@class=\"Header\"]/@href'\n ).extract()\n if job_url:\n meta = {\n 'title': job.xpath(\n './div[@class=\"Description\"]/a[@class=\"Header\"]/text()'\n ).extract(),\n 'jobcategory': job.xpath(\n './/span[contains(@id, \"_lblJobCategory\")]/text()'\n ).extract(),\n 'salary': job.xpath(\n './/span[contains(@id, \"_lblSalary\")]/text()'\n ).extract(),\n 'location': job.xpath(\n './/span[contains(@id, \"_lblLocation\")]/text()'\n ).extract(),\n 'ref_num': job.xpath(\n './/span[contains(@id, \"_lblJobRef\")]/text()'\n ).extract(),\n }\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta,\n url=urljoin(response.url, job_url[0])\n )\n\n next_page_url = selector.xpath(\n '//a[contains(@id, \"_hplNext\")]/@href').extract()\n if next_page_url:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page_url[0])\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_xpath('description', '//div[@class=\"Description\"]/span')\n\n loader.add_value(\n 'referencenumber', response.meta.get('ref_num'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('baseSalary', response.meta.get('salary'))\n loader.add_value('jobcategory', response.meta.get('jobcategory'))\n loader.add_value('location', response.meta.get('location'))\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n\n", "sub_path": "brightcorp/brightcorp/spiders/harrishill.py", "file_name": "harrishill.py", "file_ext": "py", "file_size_in_byte": 2950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "brightcorp.base.atsspiders.ATSSpider", "line_number": 18, "usage_type": "name"}, {"api_name": "scrapy.selector.Selector", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 49, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 52, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 58, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 60, "usage_type": "call"}, {"api_name": "brightcorp.items.BrightcorpItemLoader", "line_number": 64, "usage_type": "call"}, {"api_name": "brightcorp.processors.Prefix", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "266455150", "text": "# STD\nimport os\nimport time\n\n# MISC\nimport numpy as np\n\n# DL-framework\nimport torch\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom torchvision import transforms\nfrom torchvision.utils import make_grid\nfrom torch.nn import functional as F\n\n# MODULES\nfrom src.segmentation.fast_scnn import FastSCNN\n\n# Visualizer\nfrom src.common.visu import Visualizer\n\n__all__ = [\"Network\"]\n\n\ndef wrap(s, length, hard=False):\n if len(s) < length:\n return s + \" \" * (length - len(s))\n if len(s) > length and hard:\n return s[:length]\n return s\n\n\nclass Network(LightningModule):\n def __init__(self, exp, env):\n super().__init__()\n self._epoch_start_time = time.time()\n self._exp = exp\n self._env = env\n self.hparams[\"lr\"] = self._exp[\"lr\"]\n print(self._exp)\n self.model = FastSCNN(**self._exp[\"seg\"][\"cfg\"])\n\n p_visu = os.path.join(self._exp[\"name\"], \"visu\")\n self._output_transform = transforms.Compose(\n [\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )\n self.visualizer = Visualizer(\n p_visu=p_visu, logger=None, num_classes=self._exp[\"seg\"][\"cfg\"][\"num_classes\"] + 1\n )\n self._mode = \"train\"\n\n self._plot_images = {\"train\": 0, \"val\": 0, \"test\": 0}\n self._plot_images_max = {\"train\": 2, \"val\": 2, \"test\": 2}\n\n def forward(self, batch, **kwargs):\n return self.model(batch)\n\n def on_train_epoch_start(self):\n self._mode = \"train\"\n for k in self._plot_images.keys():\n self._plot_images[k] = 0\n\n def on_train_start(self):\n print(\"Start\")\n self.visualizer.logger = self.logger\n\n def on_epoch_start(self):\n self.visualizer.epoch = self.current_epoch\n\n def training_step(self, batch, batch_idx):\n real = self._output_transform(batch[0])\n render = self._output_transform(batch[1])\n target = batch[2]\n synthetic = batch[3]\n\n BS, C, H, W = real.shape\n inp = torch.cat([real, render], dim=1)\n\n outputs = self(batch=inp)\n loss = F.cross_entropy(outputs[0], target, ignore_index=-1, reduction=\"none\").mean(\n dim=(1, 2)\n )\n pred = torch.argmax(outputs[0], 1)\n\n # LOG\n self.plot(batch[0], batch[1], pred, target)\n\n # COMPUTE STATISTICS\n\n acc = (pred == target).sum() / (BS * H * W)\n\n TN = ((pred == 0) * (target == 0)).sum().float()\n FP = ((pred == 1) * (target == 0)).sum().float()\n TP = ((pred == 1) * (target == 1)).sum().float()\n FN = ((pred == 0) * (target == 1)).sum().float()\n s = (TN + FP + TP + FN).float()\n TN /= s\n FP /= s\n TP /= s\n FN /= s\n self.log(f\"{self._mode}_TN_ratio\", TN, on_step=False, on_epoch=True)\n self.log(f\"{self._mode}_TP_ratio\", TP, on_step=False, on_epoch=True)\n self.log(f\"{self._mode}_FN_ratio\", FN, on_step=False, on_epoch=True)\n self.log(f\"{self._mode}_FP_ratio\", FP, on_step=False, on_epoch=True)\n\n self.log(f\"{self._mode}_acc\", acc, on_step=False, on_epoch=True)\n self.log(f\"{self._mode}_loss\", loss.mean(), on_step=False, on_epoch=True)\n\n if synthetic.sum() > 0:\n self.log(\n f\"{self._mode}_render_loss\",\n loss[synthetic].mean(),\n on_step=False,\n on_epoch=True,\n )\n\n non_synthetic = synthetic == False\n if non_synthetic.sum() > 0:\n self.log(\n f\"{self._mode}_real_loss\",\n loss[non_synthetic].mean(),\n on_step=False,\n on_epoch=True,\n )\n\n loss = loss.mean()\n return {\"loss\": loss}\n\n def plot(self, ori_real, ori_render, pred, target):\n i = int(self._plot_images[self._mode])\n self.visualizer.plot_image(\n tag=\"abc\",\n img=np.uint8(np.random.randint(0, 255, (100, 100, 3))),\n method=\"default\",\n )\n\n if self._plot_images[self._mode] < self._plot_images_max[self._mode]:\n self._plot_images[self._mode] += 1\n print(\"PERFORM PLOT\")\n BS = pred.shape[0]\n rows = int(BS ** 0.5)\n grid_target = make_grid(\n target[:, None].repeat(1, 3, 1, 1),\n nrow=rows,\n padding=2,\n scale_each=False,\n pad_value=2,\n )\n grid_pred = make_grid(\n pred[:, None].repeat(1, 3, 1, 1),\n nrow=rows,\n padding=2,\n scale_each=False,\n pad_value=2,\n )\n\n grid_ori_real = make_grid(\n ori_real, nrow=rows, padding=2, scale_each=False, pad_value=0\n )\n grid_ori_render = make_grid(\n ori_render, nrow=rows, padding=2, scale_each=False, pad_value=0\n )\n\n self.visualizer.plot_segmentation(label=grid_target[0], method=\"right\")\n self.visualizer.plot_segmentation(\n label=grid_pred[0], method=\"left\", tag=f\"{self._mode}_Left_Pred__GT_right_{i}\"\n )\n\n self.visualizer.plot_image(img=grid_ori_real, method=\"right\")\n self.visualizer.plot_segmentation(\n label=grid_pred[0],\n method=\"left\",\n tag=f\"{self._mode}_Left_Pred__Right_Image_{i}\",\n )\n\n self.visualizer.plot_image(\n img=torch.cat([grid_ori_real, grid_ori_render], dim=2), method=\"right\"\n )\n self.visualizer.plot_segmentation(\n label=grid_pred[0],\n method=\"left\",\n tag=f\"{self._mode}_Left_Pred__Right_Composed-Image_{i}\",\n )\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n return self.training_step(batch, batch_idx)\n\n def on_validation_epoch_start(self):\n self._mode = \"val\"\n for k in self._plot_images.keys():\n self._plot_images[k] = 0\n\n def on_test_epoch_start(self):\n self._mode = \"test\"\n for k in self._plot_images.keys():\n self._plot_images[k] = 0\n\n def test_step(self, batch, batch_idx):\n return self.training_step(batch, batch_idx)\n\n def configure_optimizers(self):\n if self._exp[\"optimizer\"][\"name\"] == \"ADAM\":\n optimizer = torch.optim.Adam(\n [{\"params\": self.model.parameters()}], lr=self.hparams[\"lr\"]\n )\n elif self._exp[\"optimizer\"][\"name\"] == \"SGD\":\n optimizer = torch.optim.SGD(\n [{\"params\": self.model.parameters()}],\n lr=self.hparams[\"lr\"],\n **self._exp[\"optimizer\"][\"sgd_cfg\"],\n )\n elif self._exp[\"optimizer\"][\"name\"] == \"WADAM\":\n optimizer = torch.optim.AdamW(\n self.model.parameters(),\n lr=self.hparams[\"lr\"],\n **self._exp[\"optimizer\"][\"wadam_cfg\"],\n )\n else:\n raise Exception\n\n if self._exp.get(\"lr_scheduler\", {}).get(\"active\", False):\n if self._exp[\"lr_scheduler\"][\"name\"] == \"POLY\":\n # polynomial lr-scheduler\n init_lr = self.hparams[\"lr\"]\n max_epochs = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"max_epochs\"]\n target_lr = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"target_lr\"]\n power = self._exp[\"lr_scheduler\"][\"poly_cfg\"][\"power\"]\n lambda_lr = (\n lambda epoch: (\n ((max_epochs - min(max_epochs, epoch)) / max_epochs) ** (power)\n )\n + (1 - (((max_epochs - min(max_epochs, epoch)) / max_epochs) ** (power)))\n * target_lr\n / init_lr\n )\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer, lambda_lr, last_epoch=-1, verbose=True\n )\n elif self._exp[\"lr_scheduler\"][\"name\"] == \"OneCycleLR\":\n num_steps = self._exp[\"lr_scheduler\"][\"onecyclelr_cfg\"][\"num_steps\"]\n\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=self.hparams[\"lr\"],\n total_steps=num_steps + 100,\n pct_start=0.05,\n cycle_momentum=False,\n anneal_strategy=\"linear\",\n )\n\n lr_scheduler = {\"scheduler\": scheduler, \"interval\": \"step\"}\n\n ret = {\"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler}\n else:\n ret = [optimizer]\n return ret\n", "sub_path": "src/segmentation/lightning_seg/lightning_seg.py", "file_name": "lightning_seg.py", "file_ext": "py", "file_size_in_byte": 7630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pytorch_lightning.core.lightning.LightningModule", "line_number": 32, "usage_type": "name"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "src.segmentation.fast_scnn.FastSCNN", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "src.common.visu.Visualizer", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torchvision.utils.make_grid", "line_number": 143, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 150, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 158, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 204, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 208, "usage_type": "attribute"}, {"api_name": "torch.optim.AdamW", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 214, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 237, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.OneCycleLR", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 243, "usage_type": "attribute"}]} +{"seq_id": "545380558", "text": "'''库:aid1811,集合:stuinfo,文档:唐伯虎'''\nimport pymongo\n\ndatabase = 'AID1811'\ntable = 'stuinfo'\n\n# 创建连接对象\nconn = pymongo.MongoClient('localhost',27017)\n# 创建库对象\ndb = conn[database]\n# 创建集合对象\nmyset = db[table]\n# 执行插入语句\nmyset.insert_one({'name':'秋香'})\n\n# >>>show dbs\n# >>>use aid1811\n# >>>show collections\n# >>>db.stuinfo.find().pretty()\n# >>>db.stuinfo.count()\n# >>>db.dropDatabase()\n\n\n\n\n\n\n", "sub_path": "第二天/day02/day02/04_pymongo回顾.py", "file_name": "04_pymongo回顾.py", "file_ext": "py", "file_size_in_byte": 446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "521931084", "text": "\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport xgboost as xgb\nget_ipython().magic('matplotlib inline')\n\n\n# In[2]:\n\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\n\n# In[3]:\n\ntrain = train.drop(['Ticket', 'Cabin'], axis=1)\ntest = test.drop(['Ticket', 'Cabin'], axis=1)\n\n\n# In[4]:\n\ncombine = [train, test]\n\n\n# In[5]:\n\nfor dataset in combine:\n dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\\.', expand=False)\n\npd.crosstab(train['Title'], train['Sex'])\n\n\n# In[6]:\n\npd.crosstab(test['Title'], train['Sex'])\n\n\n# In[7]:\n\nfor dataset in combine:\n dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess', 'Dr', 'Rev', 'Dona'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n dataset['Title'] = dataset['Title'].replace(['Capt', 'Col', 'Don', 'Jonkheer', 'Major', 'Sir'], 'Mr')\n\n\n# In[9]:\n\ntrain['Title'].unique()\n\n\n# In[10]:\n\ntrain.head()\n\n\n# In[11]:\n\ntest.head()\n\n\n# In[12]:\n\ntrain[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()\n\n\n# In[13]:\n\ntitle1 = pd.get_dummies(train['Title'], drop_first=True)\ntrain.drop(['Title'], axis=1, inplace=True)\ntrain = pd.concat([train, title1], axis=1)\n\n\n# In[14]:\n\ntitle2 = pd.get_dummies(test['Title'], drop_first=True)\ntest.drop(['Title'], axis=1, inplace=True)\ntest = pd.concat([test, title2], axis=1)\n\n\n# In[15]:\n\ntrain.head(2)\n\n\n# In[16]:\n\ntest.head(2)\n\n\n# In[17]:\n\ntrain = train.drop(['Name', 'PassengerId'], axis=1)\ntest = test.drop(['Name'], axis=1)\n\n\n# In[18]:\n\ncombine = [train, test]\ntrain.shape, test.shape\n\n\n# In[19]:\n\ntrain.head()\n\n\n# In[20]:\n\ntest.head()\n\n\n# In[21]:\n\nsex1 = pd.get_dummies(train['Sex'], drop_first=True)\ntrain.drop(['Sex'], axis=1, inplace=True)\ntrain = pd.concat([train, sex1], axis=1)\n\n\n# In[22]:\n\nsex2 = pd.get_dummies(test['Sex'], drop_first=True)\ntest.drop(['Sex'], axis=1, inplace=True)\ntest = pd.concat([test, sex2], axis=1)\n\n\n# In[23]:\n\ntrain.head()\n\n\n# In[24]:\n\ntest.head()\n\n\n# In[25]:\n\nguess_ages = np.zeros((2,3))\nguess_ages\n\n\n# In[26]:\n\nfor i in range(0,2):\n for j in range(0,3):\n guess_df1 = train[(train['male'] == i) & (train['Pclass'] == j+1)]['Age'].dropna()\n age_guess1 = guess_df1.median()\n guess_ages[i,j] = int(age_guess1/0.5 + 0.5) * 0.5\n \nfor i in range(0,2):\n for j in range(0,3):\n train.loc[(train.Age.isnull()) & (train.male == i) & (train.Pclass == j+1), 'Age'] = guess_ages[i,j]\n \ntrain['Age'] = train['Age'].astype(int)\ntrain.head()\n\n\n# In[27]:\n\nfor i in range(0,2):\n for j in range(0,3):\n guess_df2 = test[(test['male'] == i) & (test['Pclass'] == j+1)]['Age'].dropna()\n age_guess2 = guess_df2.median()\n guess_ages[i,j] = int(age_guess2/0.5 + 0.5) * 0.5\n \nfor i in range(0,2):\n for j in range(0,3):\n test.loc[(test.Age.isnull()) & (test.male == i) & (test.Pclass == j+1), 'Age'] = guess_ages[i,j]\n \ntest['Age'] = test['Age'].astype(int)\ntest.head()\n\n\n# In[28]:\n\nembarked1 = pd.get_dummies(train['Embarked'], drop_first=True)\ntrain.drop(['Embarked'], axis=1, inplace=True)\ntrain = pd.concat([train, embarked1], axis=1)\n\n\nembarked2 = pd.get_dummies(test['Embarked'], drop_first=True)\ntest.drop(['Embarked'], axis=1, inplace=True)\ntest = pd.concat([test, embarked2], axis=1)\n\n\n# In[29]:\n\ntrain.head(1)\n\n\n# In[30]:\n\ntest.head(1)\n\n\n# In[31]:\n\ntrain['FamilySize'] = train['SibSp'] + train['Parch'] +1\ntest['FamilySize'] = test['SibSp'] + test['Parch'] +1\n\n\n# In[32]:\n\ntrain.head(1)\n\n\n# In[33]:\n\ntrain[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)\n\n\n# In[34]:\n\ntrain['IsAlone'] = 0\ntrain.loc[train['FamilySize'] == 1, 'IsAlone'] = 1\ntest['IsAlone'] = 0\ntest.loc[test['FamilySize'] == 1, 'IsAlone'] = 1\n\ntrain[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()\n\n\n# In[35]:\n\ntrain = train.drop(['SibSp', 'Parch', 'FamilySize'], axis=1)\ntest = test.drop(['SibSp', 'Parch', 'FamilySize'], axis=1)\ncombine = [train, test]\n\n\n# In[36]:\n\npclass1 = pd.get_dummies(train['Pclass'], drop_first=True)\ntrain.drop(['Pclass'], axis=1, inplace=True)\ntrain = pd.concat([train, pclass1], axis=1)\n\n\n# In[37]:\n\ntrain.head(1)\n\n\n# In[38]:\n\npclass2 = pd.get_dummies(test['Pclass'], drop_first=True)\ntest.drop(['Pclass'], axis=1, inplace=True)\ntest = pd.concat([test, pclass2], axis=1)\n\n\n# In[39]:\n\ntest.head(1)\n\n\n# In[40]:\n\ntest['Fare'].fillna(test['Fare'].dropna().median(), inplace=True)\n\n\n# In[40]:\n\nX_train = train.drop(\"Survived\", axis=1)\ny_train = train[\"Survived\"]\nX_test = test.drop([\"PassengerId\", \"Master\"], axis=1)\nX_train.shape, y_train.shape, X_test.shape\n\n\n# In[41]:\n\nX_test.isnull().any()\n\n\n# In[41]:\n\ntrain.to_csv('Ftrain.csv', index=False)\ntest.to_csv('Ftest.csv', index=False)\n\n\n# In[150]:\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# In[151]:\n\nlr = LogisticRegression()\nlr.fit(X_train, y_train)\npred_lr = lr.predict(X_test)\nacc_lr = round(lr.score(X_train, y_train) * 100, 2)\nacc_lr\n\n\n# In[152]:\n\nsvc = SVC()\nsvc.fit(X_train, y_train)\npred_svc = svc.predict(X_test)\nacc_svc = round(svc.score(X_train, y_train) * 100, 2)\nacc_svc\n\n\n# In[153]:\n\nknn = KNeighborsClassifier(n_neighbors = 4, weights = 'distance')\nknn.fit(X_train, y_train)\npred_knn = knn.predict(X_test)\nacc_knn = round(knn.score(X_train, y_train) * 100, 2)\nacc_knn\n\n\n# In[154]:\n\ndt = DecisionTreeClassifier()\ndt.fit(X_train, y_train)\npred_dt = dt.predict(X_test)\nacc_dt = round(dt.score(X_train, y_train) * 100, 2)\nacc_dt\n\n\n# In[155]:\n\nrf = RandomForestClassifier(n_estimators=75)\nrf.fit(X_train, y_train)\npred_rf = rf.predict(X_test)\nacc_rf = round(rf.score(X_train, y_train) * 100, 2)\nacc_rf\n\n\n# In[156]:\n\npred_rf\n\n\n# In[157]:\n\nsubmission2 = pd.DataFrame({\"PassengerId\": test[\"PassengerId\"], \"Survived\": pred_rf})\nsubmission2.to_csv('C:/Users/karth/Data Science/Datasets/Titanic/submission2.csv', index=False)\n\n\n# In[158]:\n\nxgb_params = {\n 'eta': 0.05,\n 'max_depth': 5,\n 'subsample': 0.7,\n 'colsample_bytree': 0.7,\n 'objective': 'multi:softmax',\n 'num_class': 2,\n 'eval_metric': 'mlogloss',\n 'silent': 1\n}\n\n\n# In[ ]:\n\n\n\n\n# In[159]:\n\ndtrain = xgb.DMatrix(X_train, y_train)\ndtest = xgb.DMatrix(X_test)\n\n\n# In[160]:\n\ncv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20, verbose_eval=100, show_stdv=False)\n\n\n# In[167]:\n\nmodel = xgb.XGBClassifier(max_depth=3, n_estimators=1000, learning_rate=0.01).fit(X_train, y_train)\npredictions = model.predict(X_test)\n\n\n# In[168]:\n\ny_predict = model.predict(X_test)\noutput = pd.DataFrame({\"PassengerId\": test[\"PassengerId\"], \"Survived\": y_predict})\noutput.head()\n\n\n# In[169]:\n\noutput.to_csv('xgbTitanic.csv', index=False)\n\n\n# In[ ]:\n\n\n\n", "sub_path": "Titanic.py", "file_name": "Titanic.py", "file_ext": "py", "file_size_in_byte": 7160, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.crosstab", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 183, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 188, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 190, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 238, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 240, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 250, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 252, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 298, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 307, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 316, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 325, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 334, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 348, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 373, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 374, "usage_type": "call"}, {"api_name": "xgboost.cv", "line_number": 379, "usage_type": "call"}, {"api_name": "xgboost.XGBClassifier", "line_number": 384, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 391, "usage_type": "call"}]} +{"seq_id": "124618121", "text": "import matplotlib; matplotlib.use('QT5Agg')\nimport is_prime\nimport matplotlib.pyplot as plt\nimport time\nimport sys\n\n\ndef main(n):\n input = []\n output = []\n for i in range(2, n):\n input.append(i)\n output.append(X.pi(i))\n plt.xlabel('n')\n plt.ylabel('π(n)')\n plt.xlim(0, n)\n plt.ylim(0, n/3)\n plt.autoscale(False)\n plt.plot(input, output)\n\n\nnumber = int(sys.argv[1])\nstart_time = time.time()\nX = is_prime.IsPrime()\nX.gen_eratosthenes(number + 1)\nX.gen_pi()\nmain(number)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nplt.show()\n", "sub_path": "2-6-sum.py", "file_name": "2-6-sum.py", "file_ext": "py", "file_size_in_byte": 578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.use", "line_number": 1, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.autoscale", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "is_prime.IsPrime", "line_number": 24, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "399120825", "text": "from unittest import TestCase\nimport os\nimport tempfile\nimport numpy as np\nfrom aequilibrae.paths import Graph\nfrom os.path import join\nimport sys\nfrom parameters_test import centroids\n\n# Adds the folder with the data to the path and collects the paths to the files\nlib_path = os.path.abspath(os.path.join('..', '../tests'))\nsys.path.append(lib_path)\nfrom data import path_test, test_graph, test_network\n\n\nclass TestGraph(TestCase):\n\n def test_create_from_geography(self):\n self.graph = Graph()\n self.graph.create_from_geography(\n test_network, 'link_id', 'dir', 'distance', centroids=centroids, skim_fields = [], anode=\"A_NODE\",\n bnode=\"B_NODE\")\n self.graph.set_graph(cost_field='distance', block_centroid_flows=True)\n\n def test_load_network_from_csv(self):\n pass\n\n def test_prepare_graph(self):\n self.test_create_from_geography()\n self.graph.prepare_graph(centroids)\n\n reference_graph = Graph()\n reference_graph.load_from_disk(test_graph)\n if not np.array_equal(self.graph.graph, reference_graph.graph):\n self.fail('Reference graph and newly-prepared graph are not equal')\n\n def test_set_graph(self):\n self.test_prepare_graph()\n self.graph.set_graph(cost_field='distance',block_centroid_flows=True)\n if self.graph.num_zones != centroids.shape[0]:\n self.fail('Number of centroids not properly set')\n if self.graph.num_links != 222:\n self.fail('Number of links not properly set')\n if self.graph.num_nodes != 93:\n self.fail('Number of nodes not properly set - ' + str(self.graph.num_nodes))\n\n def test_save_to_disk(self):\n self.test_create_from_geography()\n self.graph.save_to_disk(join(path_test, 'aequilibrae_test_graph.aeg'))\n self.graph_id = self.graph.__id__\n self.graph_version = self.graph.__version__\n\n def test_load_from_disk(self):\n self.test_save_to_disk()\n reference_graph = Graph()\n reference_graph.load_from_disk(test_graph)\n\n new_graph = Graph()\n new_graph.load_from_disk(join(path_test, 'aequilibrae_test_graph.aeg'))\n\n comparisons = [('Graph', new_graph.graph, reference_graph.graph),\n ('b_nodes', new_graph.b_node, reference_graph.b_node),\n ('Forward-Star', new_graph.fs, reference_graph.fs),\n ('cost', new_graph.cost, reference_graph.cost),\n ('centroids', new_graph.centroids, reference_graph.centroids),\n ('skims', new_graph.skims, reference_graph.skims),\n ('link ids', new_graph.ids, reference_graph.ids),\n ('Network', new_graph.network, reference_graph.network),\n ('All Nodes', new_graph.all_nodes, reference_graph.all_nodes),\n ('Nodes to indices', new_graph.nodes_to_indices, reference_graph.nodes_to_indices)]\n\n for comparison, newg, refg in comparisons:\n if not np.array_equal(newg, refg):\n self.fail('Reference %s and %s created and saved to disk are not equal' %(comparison, comparison))\n\n comparisons = [('nodes', new_graph.num_nodes, reference_graph.num_nodes),\n ('links', new_graph.num_links, reference_graph.num_links),\n ('zones', new_graph.num_zones, reference_graph.num_zones),\n ('block through centroids', new_graph.block_centroid_flows, reference_graph.block_centroid_flows),\n ('Graph ID', new_graph.__id__, self.graph_id),\n ('Graph Version', new_graph.__version__, self.graph_version)]\n\n for comparison, newg, refg in comparisons:\n if newg != refg:\n self.fail('Reference %s and %s created and saved to disk are not equal' %(comparison, comparison))\n\n def test_reset_single_fields(self):\n pass\n\n def test_add_single_field(self):\n pass\n\n def test_available_skims(self):\n self.test_set_graph()\n if self.graph.available_skims() != ['distance']:\n self.fail('Skim availability with problems')\n", "sub_path": "tests/aequilibrae/paths/test_graph.py", "file_name": "test_graph.py", "file_ext": "py", "file_size_in_byte": 4203, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 16, "usage_type": "name"}, {"api_name": "aequilibrae.paths.Graph", "line_number": 19, "usage_type": "call"}, {"api_name": "data.test_network", "line_number": 21, "usage_type": "argument"}, {"api_name": "parameters_test.centroids", "line_number": 21, "usage_type": "name"}, {"api_name": "parameters_test.centroids", "line_number": 30, "usage_type": "argument"}, {"api_name": "aequilibrae.paths.Graph", "line_number": 32, "usage_type": "call"}, {"api_name": "data.test_graph", "line_number": 33, "usage_type": "argument"}, {"api_name": "numpy.array_equal", "line_number": 34, "usage_type": "call"}, {"api_name": "parameters_test.centroids.shape", "line_number": 40, "usage_type": "attribute"}, {"api_name": "parameters_test.centroids", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "data.path_test", "line_number": 49, "usage_type": "argument"}, {"api_name": "aequilibrae.paths.Graph", "line_number": 55, "usage_type": "call"}, {"api_name": "data.test_graph", "line_number": 56, "usage_type": "argument"}, {"api_name": "aequilibrae.paths.Graph", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "data.path_test", "line_number": 59, "usage_type": "argument"}, {"api_name": "numpy.array_equal", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "448803450", "text": "\n\n\nfrom sklearn.model_selection import KFold \nimport numpy as np\nimport pandas as pd\na = pd.read_csv('/home/liujl/data_9_1/pdb_list_3_new.txt',header=None,sep='\\s+')\n\na=a.values.tolist()\nfrom itertools import chain\na= list(chain.from_iterable(a))\na= np.array(a)\nkf = KFold(n_splits=5)\n\n\n\n\n\ntrain=[]\ntest=[]\ncount = 1\nfor train_index, test_index in kf.split(a):\n b=[]\n c=[]\n \n print(train_index,test_index)\n\n print(a[train_index],a[test_index])\n\n for i in range(len(a[train_index])):\n #print(a[train_index][i])\n letter=a[train_index][i]+ '_output.txt'\n #print(\"leter\",leter)\n b.append(letter)\n train.append(b)\n\n for j in range(len(a[test_index])):\n #print(a[test_index][j])\n letter=a[test_index][j]+ '_output.txt'\n #print(\"letter\",letter)\n c.append(letter)\n test.append(c)\n\n\ndel i,j,letter,b,c\n\nprint(len(train[0]))\n\n\n\n\nimport os\ndirPath = '/home/liujl/data_9_1/features_of_3_new'\n\nfor i in range(5):\n file_1 = open('/home/liujl/dis_trimer/'+ '%d'%(i+1)+'_data/'+'%d'%(i+1) + '_train.txt','w')\n file_2 = open('/home/liujl/dis_trimer/'+ '%d'%(i+1)+'_data/'+'%d'%(i+1) + '_test.txt','w')\n for filename in train[i]:\n filepath_1 = dirPath + '/'+filename\n for line in open(filepath_1):\n file_1.writelines(line)\n file_1.close()\n\n for filename in test[i]:\n filepath_2 = dirPath + '/'+filename\n for line in open(filepath_2):\n file_2.writelines(line)\n file_2.close()\n \n\n \n\n\n\n \n\n'''\nfor i in range(5):\n b(i)=train_index\n c(i)=test_index\n print(b(i))\n print('\\n')\n print(c(i))\n'''\n \n \n\n\n\n#print( kf.split( a ) )\n", "sub_path": "split.py", "file_name": "split.py", "file_ext": "py", "file_size_in_byte": 1566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 11, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "126547986", "text": "\"\"\"\nImplements a PacketSink, designed to record both arrival times and waiting times from the incoming\npackets.\n\nBy default, it records absolute arrival times, but it can also be initialized to record\ninter-arrival times.\n\"\"\"\nfrom collections import defaultdict as dd\n\nimport simpy\n\n\nclass PacketSink:\n \"\"\" A PacketSink is designed to record both arrival times and waiting times from the incoming\n packets. By default, it records absolute arrival times, but it can also be initialized to record\n inter-arrival times.\n\n Parameters\n ----------\n env: simpy.Environment\n the simulation environment\n rec_arrivals: bool\n if True, arrivals will be recorded\n absolute_arrivals: bool\n if True absolute arrival times will be recorded, otherwise the time between\n consecutive arrivals is recorded.\n rec_waits: bool\n if True, the waiting times experienced by the packets are recorded\n rec_flow_ids: bool\n if True, the flow IDs that the packets are used as the index for recording;\n otherwise, the 'src' field in the packets are used\n debug: bool\n If True, prints more verbose debug information.\n \"\"\"\n\n def __init__(self,\n env,\n rec_arrivals: bool = True,\n absolute_arrivals: bool = True,\n rec_waits: bool = True,\n rec_flow_ids: bool = True,\n debug: bool = False):\n self.store = simpy.Store(env)\n self.env = env\n self.rec_waits = rec_waits\n self.rec_flow_ids = rec_flow_ids\n self.rec_arrivals = rec_arrivals\n self.absolute_arrivals = absolute_arrivals\n self.waits = dd(list)\n self.arrivals = dd(list)\n self.packets_received = dd(lambda: 0)\n self.bytes_received = dd(lambda: 0)\n self.packet_sizes = dd(list)\n self.packet_times = dd(list)\n self.perhop_times = dd(list)\n\n self.first_arrival = dd(lambda: 0)\n self.last_arrival = dd(lambda: 0)\n\n self.debug = debug\n\n def put(self, packet):\n \"\"\" Sends a packet to this element. \"\"\"\n now = self.env.now\n\n if self.rec_flow_ids:\n rec_index = packet.flow_id\n else:\n rec_index = packet.src\n\n if self.rec_waits:\n self.waits[rec_index].append(self.env.now - packet.time)\n self.packet_sizes[rec_index].append(packet.size)\n self.packet_times[rec_index].append(packet.time)\n self.perhop_times[rec_index].append(packet.perhop_time)\n\n if self.rec_arrivals:\n self.arrivals[rec_index].append(now)\n if len(self.arrivals[rec_index]) == 1:\n self.first_arrival[rec_index] = now\n\n if not self.absolute_arrivals:\n self.arrivals[rec_index][\n -1] = now - self.last_arrival[rec_index]\n\n self.last_arrival[rec_index] = now\n\n if self.debug:\n print(\"At time {:.2f}, packet {:d} arrived.\".format(\n now, packet.packet_id))\n if self.rec_waits and len(self.packet_sizes[rec_index]) >= 10:\n bytes_received = sum(self.packet_sizes[rec_index][-9:])\n time_elapsed = self.env.now - (\n self.packet_times[rec_index][-10] +\n self.waits[rec_index][-10])\n print(\n \"Average throughput (last 10 packets): {:.2f} bytes/second.\"\n .format(float(bytes_received) / time_elapsed))\n\n self.packets_received[rec_index] += 1\n self.bytes_received[rec_index] += packet.size\n", "sub_path": "ns/packet/sink.py", "file_name": "sink.py", "file_ext": "py", "file_size_in_byte": 3637, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "simpy.Store", "line_number": 43, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 49, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 50, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 52, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 55, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 57, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "67342229", "text": "import praw\n\nBOT_NAME = 'WWW Rule1 Bot'\nVERSION_NUMBER = '0.1'\nUSER_AGENT = BOT_NAME + ' ' + VERSION_NUMBER + ', a service script for this account by u/MysteriousHobo2.'\nSUBREDDIT = \"WhoWouldWin\"\nUSER_NAME = \"WWWMod\"\nMOD_RECIPIENTS = [\"nkonrad\", \"MysteriousHobo2\", \"IMadeThisOn6-28-2015\", \"British_Tea_Company\"]\nme= \"MysteriousHobo2\"\n\n\nprint('\\n|======================Logging in as u/{}...======================|'.format(USER_NAME))\n\n\nreddit = praw.Reddit(client_id=\"SBTHXZuem0KP6w\",\n\t\t\t\t\t client_secret=\"i_zJvSd3NrFeSsq1jkJktX1MCvo\", password='Be7Ab66cZ773b',\n\t\t\t\t\t user_agent=USER_AGENT, username=USER_NAME)\ndef notInMessages(body):\n for message in reddit.inbox.sent(limit=50):\n if body == message.body:\n return False\n return True\ndef rule1Lookup():\n for item in reddit.subreddit(SUBREDDIT).mod.reports(limit = 50):\n if \"Rule 1: Being Rude, Condescending, or Insulting\" in str(item.user_reports):\n message_body = \"\"\"Body: {}\n\nUser: {}\n\nLink: {}\"\"\".format(str(item.body), str(item.author), str(item.permalink))\n if notInMessages(message_body):\n for mod in MOD_RECIPIENTS:\n reddit.redditor(mod).message(\"Rule 1 violation found\", message_body)\nrule1Lookup()", "sub_path": "www_mod/rule1_bot.py", "file_name": "rule1_bot.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "praw.Reddit", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "316732022", "text": "# modified from https://github.com/AndrewCarterUK/pascal-voc-writer\n\n################################################################################\n# MIT License\n#\n# Copyright (c) 2018 Andrew Carter\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n################################################################################\n\n\nimport os\nfrom jinja2 import Environment, FileSystemLoader\n\n\nclass Writer:\n def __init__(self, path, width, height, depth=3, database='Unknown', segmented=0):\n templateLoader = FileSystemLoader(searchpath=\"./\")\n environment = Environment(loader=templateLoader)\n self.annotation_template = environment.get_template('annotation.xml')\n\n self.template_parameters = {\n 'path': path,\n 'filename': path,\n 'folder': os.path.dirname(path),\n 'width': width,\n 'height': height,\n 'depth': depth,\n 'database': database,\n 'segmented': segmented,\n 'objects': []\n }\n\n def addObject(self, name, xmin, ymin, xmax, ymax, pose='Unspecified', truncated=0, difficult=0):\n self.template_parameters['objects'].append({\n 'name': name,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax,\n 'pose': pose,\n 'truncated': truncated,\n 'difficult': difficult,\n })\n\n def save(self, annotation_path):\n with open(annotation_path, 'w') as file:\n content = self.annotation_template.render(**self.template_parameters)\n file.write(content)\n", "sub_path": "pascal_voc_writer.py", "file_name": "pascal_voc_writer.py", "file_ext": "py", "file_size_in_byte": 2623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "jinja2.FileSystemLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "192452403", "text": "import weakref\n\nimport pytest\nimport torch\n\nfrom torchgpipe.dependency import Fork, Join\n\n\ndef test_phony():\n x = torch.zeros(0, requires_grad=True)\n\n y, phony = Fork.apply(x)\n z, phony2 = Fork.apply(y)\n\n # Fork doesn't modify the given tensor.\n assert y.data_ptr() == x.data_ptr()\n\n # Phony tensors have no space.\n assert phony.size() == (0,)\n\n # Phony storages should be cached.\n assert phony2.data_ptr() == phony.data_ptr()\n\n # Phony tensors should not be cached.\n assert phony2 is not phony\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason='cuda required')\ndef test_fork_join():\n logs = []\n\n class Log(torch.autograd.Function):\n @staticmethod\n def forward(ctx, number, tensor):\n ctx.number = number\n return tensor.detach()\n\n @staticmethod\n def backward(ctx, grad):\n logs.append(ctx.number)\n return None, grad\n\n a = torch.rand(1, device='cpu', requires_grad=True)\n b = torch.rand(1, device='cuda', requires_grad=True)\n\n a = Log.apply(1, a)\n\n a, phony = Fork.apply(a)\n b = Join.apply(a, phony)\n\n b = Log.apply(2, b)\n b = b.to('cpu')\n\n (a+b).backward()\n\n assert logs == [2, 1]\n\n\ndef test_fork_leak():\n leak = None\n\n class F(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n return input\n\n @staticmethod\n def backward(ctx, grad):\n nonlocal leak\n leak = weakref.ref(ctx)\n return grad\n\n x = torch.rand(1, requires_grad=True)\n x = F.apply(x)\n x, phony = Fork.apply(x)\n x = Join.apply(x, phony)\n\n x.backward()\n del x, phony\n\n assert leak() is None\n", "sub_path": "tests/test_dependency.py", "file_name": "test_dependency.py", "file_ext": "py", "file_size_in_byte": 1710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork.apply", "line_number": 12, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork", "line_number": 12, "usage_type": "name"}, {"api_name": "torchgpipe.dependency.Fork.apply", "line_number": 13, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.autograd", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 44, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork.apply", "line_number": 48, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork", "line_number": 48, "usage_type": "name"}, {"api_name": "torchgpipe.dependency.Join.apply", "line_number": 49, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Join", "line_number": 49, "usage_type": "name"}, {"api_name": "pytest.mark.skipif", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.autograd", "line_number": 62, "usage_type": "attribute"}, {"api_name": "weakref.ref", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 73, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork.apply", "line_number": 75, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Fork", "line_number": 75, "usage_type": "name"}, {"api_name": "torchgpipe.dependency.Join.apply", "line_number": 76, "usage_type": "call"}, {"api_name": "torchgpipe.dependency.Join", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "2515649", "text": "import requests\n\n\nclass Client:\n def __init__(self):\n self._self_signed = False\n self._endpoint = 'https://appwrite.io/v1'\n self._global_headers = {\n 'content-type': '',\n 'x-sdk-version': 'appwrite:python:0.0.3',\n }\n\n def set_self_signed(self, status=True):\n self._self_signed = status\n return self\n\n def set_endpoint(self, endpoint):\n self._endpoint = endpoint\n return self\n\n def add_header(self, key, value):\n self._global_headers[key.lower()] = value.lower()\n return self\n\n def set_project(self, value):\n \"\"\"Your Appwrite project ID\"\"\"\n\n self._global_headers['x-appwrite-project'] = value.lower()\n return self\n\n def set_key(self, value):\n \"\"\"Your Appwrite project secret key\"\"\"\n\n self._global_headers['x-appwrite-key'] = value.lower()\n return self\n\n def set_locale(self, value):\n self._global_headers['x-appwrite-locale'] = value.lower()\n return self\n\n def set_mode(self, value):\n self._global_headers['x-appwrite-mode'] = value.lower()\n return self\n\n def call(self, method, path='', headers=None, params=None):\n if headers is None:\n headers = {}\n\n if params is None:\n params = {}\n\n data = {}\n json = {}\n \n self._global_headers.update(headers)\n\n if method != 'get':\n data = params\n params = {}\n\n if headers['content-type'] == 'application/json':\n json = data\n data = {}\n\n response = requests.request( # call method dynamically https://stackoverflow.com/a/4246075/2299554\n method=method,\n url=self._endpoint + path,\n params=params,\n data=data,\n json=json,\n headers=self._global_headers,\n verify=self._self_signed,\n )\n\n response.raise_for_status()\n \n return response.json()\n", "sub_path": "app/sdks/python/appwrite/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.request", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "28101515", "text": "import typing\n\nfrom PyQt5 import QtCore\n\n\nclass ExposureTimesModel(QtCore.QAbstractItemModel):\n def __init__(self, parent=None, default_exptime=300, default_iterations = 1):\n super().__init__(parent)\n self._samples={}\n self.default_exptime = default_exptime\n self.default_iterations = default_iterations\n\n def addSample(self, samplename):\n if samplename not in self._samples:\n self.beginResetModel()\n self._samples[samplename] = [False, self.default_exptime, self.default_iterations]\n self.endResetModel()\n\n def removeSample(self, samplename):\n if samplename in self._samples:\n self.beginResetModel()\n del self._samples[samplename]\n self.endResetModel()\n\n def rowCount(self, parent: QtCore.QModelIndex = ...):\n return len(self._samples)\n\n def columnCount(self, parent: QtCore.QModelIndex = ...):\n return 3\n\n def index(self, row: int, column: int, parent: QtCore.QModelIndex = ...):\n return self.createIndex(row, column, None)\n\n def parent(self, child: QtCore.QModelIndex):\n return QtCore.QModelIndex()\n\n def data(self, index: QtCore.QModelIndex, role: int = ...):\n samplename = sorted(self._samples.keys())[index.row()]\n if index.column()==0:\n if role == QtCore.Qt.CheckStateRole:\n return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][self._samples[samplename][0]]\n elif role == QtCore.Qt.DisplayRole:\n return samplename\n else:\n return None\n else:\n if role == QtCore.Qt.DisplayRole:\n return str(self._samples[samplename][index.column()])\n elif role == QtCore.Qt.EditRole:\n return self._samples[samplename][index.column()]\n else:\n return None\n\n def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...):\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n return ['Sample name', 'Exposure time', 'Iterations'][section]\n return None\n\n def flags(self, index: QtCore.QModelIndex):\n samplename = sorted(self._samples.keys())[index.row()]\n flags = QtCore.Qt.ItemNeverHasChildren\n if index.column()==0:\n flags |= QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled\n else:\n flags |= QtCore.Qt.ItemIsEditable\n if self._samples[samplename][0]:\n flags |= QtCore.Qt.ItemIsEnabled\n return flags\n\n def setData(self, index: QtCore.QModelIndex, value: typing.Any, role: int = ...):\n samplename = sorted(self._samples.keys())[index.row()]\n if role == QtCore.Qt.EditRole:\n assert isinstance(value, (float, int))\n self._samples[samplename][index.column()] = value\n self.dataChanged.emit(self.index(index.row(), index.column()), self.index(index.row(), index.column()))\n return True\n elif role == QtCore.Qt.CheckStateRole:\n assert index.column()==0\n self._samples[samplename][0]=value>0\n self.dataChanged.emit(self.index(index.row(), 0), self.index(index.row(), 3))\n return True\n return False\n\n def __contains__(self, samplename):\n return samplename in self._samples\n\n def __iter__(self):\n for sam in sorted(self._samples):\n if self._samples[sam][0]:\n yield sam, self._samples[sam][1], self._samples[sam][2]\n return", "sub_path": "cct/qtgui/measurement/scripteditor/samplesexposuretimesmodel.py", "file_name": "samplesexposuretimesmodel.py", "file_ext": "py", "file_size_in_byte": 3571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "PyQt5.QtCore.QAbstractItemModel", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 34, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 49, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 49, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 54, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 55, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 55, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 59, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 63, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 67, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 70, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 70, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 72, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 72, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 77, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "66030128", "text": "import discord\nimport requests\nimport shelve\nfrom helpers import get_gif, parse, dir_path\n\ncommands = [\"sad\", \"qwq\"]\nrequires_mention = False\naccepts_mention = True\ndescription = \":(\"\n\n\nasync def execute(message):\n embed = discord.Embed()\n if len(message.mentions) == 1:\n # 1 mention\n embed.description = f\"Sad {message.mentions[0].mention} qwq\"\n shv = shelve.open(\"sad_config.config\")\n if str(message.mentions[0].id) in shv:\n gif = shv[str(message.mentions[0].id)]\n shv.close()\n else:\n shv.close()\n await message.channel.send(f\"{message.mentions[0].mention} muss zuerst mit `` +setsad `` ein Bild für den +sad Befehl hinzufügen.\")\n return\n elif len(message.mentions) > 1:\n # > 1 mentions\n await message.channel.send(\"Dieser Befehl funktioniert nur für einzelne Mentions.\")\n return\n else:\n # 0 mentions\n embed.description = f\"Sad {message.author.mention} qwq\"\n shv = shelve.open(\"sad_config.config\")\n if str(message.author.id) in shv:\n gif = shv[str(message.author.id)]\n shv.close()\n else:\n shv.close()\n await message.channel.send(\"Bitte füge zuerst mit `` +setsad `` ein Bild für deinen +sad Befehl hinzu.\")\n return\n\n embed.set_image(url=gif)\n await message.channel.send(embed=embed)\n", "sub_path": "actions/sad.py", "file_name": "sad.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "discord.Embed", "line_number": 13, "usage_type": "call"}, {"api_name": "shelve.open", "line_number": 17, "usage_type": "call"}, {"api_name": "shelve.open", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "393658427", "text": "import h5py \nfrom keras.utils.io_utils import HDF5Matrix\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\nclass AugumentedHDF5Matrix(HDF5Matrix):\n \"\"\"Wraps HDF5Matrixs with image augumentation.\"\"\"\n\n def __init__(self, image_datagen, seed, *args, **kwargs):\n self.image_datagen = image_datagen\n self.seed = seed\n self.i = 0\n super().__init__(*args, **kwargs)\n\n def __getitem__(self, key):\n x = super().__getitem__(key)\n self.i += 1\n x = x[:,:,:]\n# bgr = x[:,:,0:3]\n# rgb = bgr[...,::-1]\n# if len(x.shape) == 3:\n# return self.image_datagen.random_transform(\n# rgb, seed=self.seed + self.i)\n# else:\n return np.array([\n self.image_datagen.random_transform(\n xx, seed=self.seed + self.i) for xx in x\n ])\n\nclass DataSet:\n classes = ['compact-highrise',\n 'compact-midrise',\n 'compact-lowrise',\n 'open-high-rise',\n 'open-midrise',\n 'open-lowrise',\n 'lightweight-lowrise',\n 'large-lowrise',\n 'sparsely-built',\n 'heavy-industry',\n 'dense-trees',\n 'scattered-trees',\n 'bush-and-scrub',\n 'low-plants',\n 'bare-rock-or-paved',\n 'bare-soild-or-sand',\n 'water']\n\n data_gen_args = dict( \n rotation_range=90.\n # width_shift_range=0.05,\n # height_shift_range=0.05,\n # zoom_range=0.2,\n # channel_shift_range=0.005,\n # horizontal_flip=True,\n # vertical_flip=True,\n # fill_mode='constant',\n # data_format=\"channels_last\",\n )\n\n def __init__(self, path, classes=None):\n self.data_s1, self.data_s2, self.labels = self.load_data_fromh5(path)\n\n def load_data_fromh5(self, path):\n image_datagen = ImageDataGenerator(**self.data_gen_args)\n data_sen1 = AugumentedHDF5Matrix(image_datagen, 0, path,'sen1')\n data_sen2 = AugumentedHDF5Matrix(image_datagen, 0, path,'sen2')\n labels = HDF5Matrix(path,'labels')\n return data_sen1, data_sen2, labels\n\n def generator(self,\n shuffle=True, \n seed = 10,\n batch_size = 64,\n augment = True):\n # target_size=(112,112),\n # color_mode='RGB',\n # preprocessing = True,\n # augmentation = False):\n\n if augment:\n image_datagen = ImageDataGenerator(**self.data_gen_args)\n else: \n image_datagen = ImageDataGenerator()\n\n image_generator = image_datagen.flow(\n self.data_s2, y=self.labels, \n shuffle=shuffle,\n seed=seed, \n batch_size=batch_size,\n )\n\n return image_generator\n", "sub_path": "src/data_generator.py", "file_name": "data_generator.py", "file_ext": "py", "file_size_in_byte": 2908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.utils.io_utils.HDF5Matrix", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.utils.io_utils.HDF5Matrix", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "46763809", "text": "import sys\nimport numpy as np\nimport copy\nfrom scipy.stats import gmean\nfrom scipy import signal\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\nfrom thunderfish.dataloader import open_data\nfrom thunderfish.peakdetection import detect_peaks\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import savgol_filter\nfrom collections import deque\nimport nixio as nix\nimport time\nimport os\nimport seaborn as sns\n\ndeltat = 60.0 # seconds of buffer size\nthresh = 0.05\nmind = 0.1 # minimum distance between peaks\npeakwidththresh = 30 # maximum distance between max(peak) and min(trough) of a peak, in datapoints\nnew = 1\n \ndef main():\n \n filepath = sys.argv[1]\n default_nochetime = \"2000\"\n default_diatime= \"0600\"\n import ntpath\n ntpath.basename(\"a/b/c\")\n\n def path_leaf(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)\n\n filename = path_leaf(filepath) \n\n filename_datestart = 5\n filename_dateend = filename_datestart + 6\n date = filename[:filename_dateend]\n if date[0] == 4:\n date[0] = 7\n elif date[0] == 3:\n date[0] = 6\n\n filename_locend = filename_dateend + 6\n filename_nameend = -4\n location = filename[filename_dateend:filename_locend]\n if len(filename[:filename_nameend])>filename_locend:\n timeformat_len = len(filename[filename_locend:filename_nameend])\n filename_timeend = filename_locend + timeformat_len\n time = filename[filename_locend:filename_timeend]\n if timeformat_len == 3:\n time = '0' + time\n elif timeformat_len != 4:\n print('weird timeformat? : ', time)\n else:\n if 'Noche' in filepath or 'noche' in filepath:\n time = default_nochetime\n elif 'Dia' in filepath or 'dia' in filepath:\n time = default_diatime\n else:\n print('Time unknown')\n time = 0\n \n print('date: ', date, 'starttime: ', time) \n \n eods = np.load(filepath, mmap_mode='r')\n #eodsload = np.load(filepath, mmap_mode='r')\n #eods = np.empty_like(eodsload[:,:len(eodsload[0])//10])\n #eods[0] = eodsload[0][:len(eodsload[0])//10]\n #eods[2] = eodsload[2][:len(eodsload[0])//10]\n ## npArrays of Peakx Peaky Peakh Peakcl\n \n print(len(eods[0]))\n #eods = np.memmap(filename, dtype='float32', mode='r', shape=(4,20000))\n print(eods[0])\n\n eods_isis = np.empty([len(eods[0])])\n eods_isis[:-1] = np.diff(eods[0])\n \n if 0 in eods_isis:\n print('zero at', np.where(eods_isis == 0))\n print(eods_isis)\n next = 0\n for i in range(len(eods_isis)):\n if i < next:\n continue\n if eods_isis[i] == 0 and len(eods_isis)>i+2:\n \n eods_isis[i-1]=0\n eods_isis[i+1]= 0\n \n next = i+3\n print('eods0 where isis == 0', eods[0][eods_isis == 0])\n print('len, uniquelen', len(eods[0]), len(np.unique(eods[0])))\n \n #print(np.diff(eods[0])[-10:-1], eods_isis[-10:-1])\n eods_isis[-1] = eods_isis[-2]\n samplerate = 32000\n \n #print('0stellen in isis: ', np.where(eods_isis==0))\n eods_freq = samplerate/eods_isis #1/s\n\n #plt.plot(eods_freq)\n \n #print(eods_isis[-10:-1], eods_freq)\n max_fishisi = 5000\n # print('Classes at end: ', np.unique(classlist))\n #intervals = find_offtime(eods,eods_freq, 5, samplerate)\n #print(intervals.shape)\n #print('interval0',intervals[0])\n \n home = os.path.expanduser('~')\n os.chdir(home)\n path = filename[5:-4]+\"/\" \n # if not os.path.exists(path):\n # os.makedirs(path)\n # np.save(filename[:-4]+\"/offtime_\"+filename[:-3]+\"npy\", intervals)\n \n with open_data(path+filename[5:-4]+\".WAV\", 0, deltat, 0.0, ) as data:\n #data = data[:eods[0][-1]]\n freqavgsecpath = path + 'freq_avg_sec.mmap'\n #freqtime = np.arange(0, len(data), samplerate)\n if new == 1 or not os.path.exists(freqavgsecpath):\n avg_freqs_temp = []\n peak_ind = 0\n sec = 0\n for i in np.arange(0,len(data), samplerate):\n count_peaks_in_second = 0\n sec +=1\n count_peaks_in_second = ((i < eods[0]) & (eods[0] < i+samplerate)).sum()\n print('freq', i/samplerate,'/', len(data)/samplerate, count_peaks_in_second)\n avg_freqs_temp.append(count_peaks_in_second.astype(int))\n #avg_freqs.append(np.mean(eods_freq[i:i+samplerate]))\n avg_freqs = np.memmap(freqavgsecpath, dtype='int', mode='w+', shape=(len(avg_freqs_temp)))\n avg_freqs[:] = np.array(avg_freqs_temp)\n else:\n avg_freqs = np.memmap(freqavgsecpath, dtype='int', mode='r', shape=(len(data)//samplerate))\n print('avg_freqs after' , avg_freqs[0]) \n #plt.plot(avg_freqs)\n # plt.xlabel('seconds')\n # plt.ylabel('frequency of peaks')\n # plt.show()\n print(avg_freqs[0])\n ampavgsecpath = path + 'amp_avg_sec.mmap'\n #freqtime = np.arange(0, len(data), samplerate)\n if new == 1 or not os.path.exists(ampavgsecpath):\n avg_amps_temp = []\n peak_ind = 0\n\n for i in np.arange(0,len(data), samplerate):\n count_amp_in_second = 0\n count_amp_in_second = eods[2][(i < eods[0]) & (eods[0] < i+samplerate)]\n count_amp_in_second = np.percentile(count_amp_in_second, 80) if len(count_amp_in_second ) >0 else 0\n print('amp', i/samplerate, '/', len(data)/samplerate, end = ' ')\n avg_amps_temp.append(count_amp_in_second)\n \n print('avg_amps_temp', avg_amps_temp)\n avg_amps = np.memmap(ampavgsecpath, dtype='float32', mode='w+', shape=(len(avg_amps_temp)))\n avg_amps_temp = np.array(avg_amps_temp)\n avg_amps_temp[np.where(np.isnan(avg_amps_temp))] = 0.0\n avg_amps[:] = avg_amps_temp\n print('avg_amps ',avg_amps)\n #avg_freqs.append(np.mean(eods_freq[i:i+samplerate]))\n else:\n avg_amps = np.memmap(ampavgsecpath, dtype='float32', mode='r', shape=(len(data)//samplerate))\n \n print(ampavgsecpath)\n print('avg_amps')\n \n print(avg_amps)\n print(avg_amps[-1])\n #plt.plot(avg_amps, label = 'amplitudes')\n \n amp, ampax = plt.subplots()\n freqax = ampax.twinx()\n \n l1 = ampax.plot(avg_amps, label = 'amplitude', color = 'red')\n l2 = freqax.plot(avg_freqs, label = 'frequency', color = 'green')\n \n ls = l1+l2\n labels = [l.get_label() for l in ls]\n ampax.legend(ls, labels, loc=1)\n ampax.set_xlabel('seconds')\n ampax.set_ylabel('amplitude of peaks')\n freqax.set_ylabel('freqency of peaks')\n \n pathFreqAmp = path + 'allpeaks_FreqAmp_'+filename[5:-4]+'.png' \n plt.xlim(0,len(avg_freqs))\n plt.savefig(pathFreqAmp)\n plt.show()\n plt.close()\n # datx = int(eods[0][301510])\n # print('datx', datx)\n # #plt.plot(range(datx-2000,datx+1000),data[datx-2000:datx+1000])\n # xx = eods[0][301510-10:301510+10]\n # yy = eods[1][301510-10:301510+10]\n # xxdat = data[xx.astype(int)]\n # dataa = data[range(xx[0].astype(int),xx[-1].astype(int))]\n # dataa = savgol_filter(dataa, 11, 7) \n # plt.plot(range(xx[0].astype(int),xx[-1].astype(int)), dataa)\n # plt.plot(xx,xxdat)\n # plt.scatter(xx, yy)\n # #plt.scatter(xx,hh)\n # plt.show()\n \n \n # a = 300000\n # b = 302000\n # dat = data[eods[0][a]:eods[0][b]]\n # dat = savgol_filter(data[eods[0][a]:eods[0][b]], 11, 7)\n # datpoints = dat[eods[0][a:b].astype(int)-eods[0][a].astype(int)]\n # plt.scatter(eods[0][a:b],datpoints, color = 'red')\n # plt.plot(range(eods[0][a].astype(int),eods[0][b].astype(int)), dat)\n # plt.scatter(eods[0][a:b],eods[1][a:b])\n # plt.show()\n \n # on_time = np.arange(0,len(data),samplerate)\n # on_time[:] = True\n # for sec, freq in enumerate(avg_freqs):\n # if freq < 10:\n # on_time[sec] = False\n \n \n on_intervals, near_intervals, far_intervals = find_offtime(avg_amps, 10, 0.2)\n on_times = np.full(len(avg_amps), np.nan)\n near_times = np.full(len(avg_amps), np.nan)\n far_times = np.full(len(avg_amps), np.nan)\n for intv in on_intervals:\n on_times[intv[0]:intv[1]]=1\n for intv in near_intervals:\n near_times[intv[0]:intv[1]]=1\n for intv in far_intervals:\n far_times[intv[0]:intv[1]]=1\n\n on_times[np.where(avg_freqs < 5)] = np.nan\n near_times[np.where(avg_freqs < 5)]=np.nan\n far_times[np.where(avg_freqs < 5)]=np.nan\n\n \n np.savez(path + 'ontime' +filename[5:-4]+'.npz', on = on_times, near = near_times, far = far_times)\n\n print('offtimes', on_times)\n \n plt.fill_between(range(len(on_times)), on_times, color = '#1e2c3c', label = 'close') #'#324A64'\n plt.fill_between(range(len(near_times)), near_times, color = '#324A64', label = 'nearby')\n plt.fill_between(range(len(far_times)), far_times, color = '#8ea0b4', label = 'far')\n plt.xlabel('seconds')\n plt.ylabel('position')\n plt.legend(loc = 1)\n plt.ylim(0,1.5)\n plt.xlim(0,len(avg_freqs))\n #plt.show()\n pathOntime = path + 'ontime' +filename[5:-4]+'.png'\n plt.savefig(pathOntime)\n print('plotted')\n \n# \n# #print('ontime',on_time)\n# for intv in intervals:\n# a = intv[2] # intervalstart x\n# b= intv[3] # intervalend x\n# t0 = int(a/samplerate)\n# t1= int(b/samplerate)\n# print('false sould be on ', t0, ' to ', t1)\n# for i in range(t0, t1):\n# on_time[i] = False\n# print('ontime false',np.where(on_time == 0))\n# plt.plot(on_time)\n# print(on_time)\n# plt.show()\n# \n# for intv in intervals:\n# \n# a = intv[0]\n# b = intv[1]\n# #a = 300000\n# #b = 301000\n# dat = data[eods[0][a]:eods[0][b]]\n# dat = data[eods[0][a]:eods[0][b]]\n# datpoints = dat[eods[0][a:b].astype(int)-eods[0][a].astype(int)]\n# plt.scatter(eods[0][a:b],datpoints, color = 'red')\n# plt.plot(range(eods[0][a].astype(int),eods[0][b].astype(int)), dat)\n# plt.scatter(eods[0][a:b],eods[1][a:b])\n# plt.show()\n# olda = a\n# oldb = b\n# \n# a = intv[0]-10\n# b = intv[1]+10\n# dat = data[eods[0][a]:eods[0][b]]\n# dat = savgol_filter(data[eods[0][a]:eods[0][b]], 11, 7)\n# datpoints = dat[eods[0][a:b].astype(int)-eods[0][a].astype(int)]\n# plt.scatter(eods[0][a:b],datpoints, color = 'red')\n# plt.plot(range(eods[0][a].astype(int),eods[0][b].astype(int)), dat)\n# plt.scatter(eods[0][a:b],eods[1][a:b])\n# plt.show()\n# \n#\n \n\n\n\n\n\n\n\n \n\n# invs =find_offtime_amp(data)\n# \n# \n#\n#\n#def find_offtime_amp(data):\n# if len(data) > 100000:\n# min = np.mean(data[0:100000])\n# \n# else: print('ah cmon, data too short?')\n# partlen = 100000\n# for part in range(len(data))/partlen:\n# if np.mean(np.abs(data[part*partlen:(part+1)*partlen])) < min:\n# min = np.mean(np.abs(data[part*partlen:(part+1)*partlen]))\n# for part in range(len(data))/(0.5*partlen):\n# if np.mean(np.abs(data[part*partlen:(part+1)*partlen])) < min * 2:\n# print('off at ', part*partlen)\n# \n\n\n\ndef find_foraging(ampavg, mintime, minamp):\n #Einheit timestep\n off_mark = np.zeros([len(ampavg)])\n for step, amp in enumerate(ampavg):\n if amp >= minamp:\n off_mark[step]=1\n if minamp > amp > 0.05:\n off_mark[step]=2\n if 0.05 > amp > 0:\n off_mark[step]=3\n # plt.plot(off_mark)\n # plt.title('Offmarks')\n # plt.show()\n openstart = False\n counter = 0\n secondcounter = 0\n intervals = []\n intervals2 = []\n intervals3 = []\n find_intervals(intervals, off_mark, 1, mintime)\n find_intervals(intervals2, off_mark, 2, mintime)\n find_intervals(intervals3, off_mark, 3, mintime)\n \n#for i,m in enumerate(off_mark):\n# if m == 1:\n# counter += 1\n# secondcounter = 0\n# if not openstart and counter == mintime:\n# start = i-counter+1\n# openstart = True\n# if m ==0 or m == 2:\n# secondcounter +=1\n# counter = 0\n# if secondcounter == mintime:\n# if openstart:\n# end = i-mintime+1\n# interval = [start, end]\n# intervals.append(interval)\n# openstart = False\n# counter = 0\n#else:\n# if openstart == True:\n# end = i-secondcounter\n# interval = [start, end]\n# intervals.append(interval)\n#\n#openstart = 0\n#counter = 0\n#secondcounter = 0\n#for i,m in enumerate(off_mark):\n# if m == 2:\n# counter += 1\n# secondcounter = 0\n# if not openstart and counter == mintime:\n# start = i-counter+1\n# openstart = True\n# if m ==0 or m == 1:\n# secondcounter +=1\n# counter = 0\n# if secondcounter == mintime:\n# if openstart:\n# end = i-mintime+1\n# interval = [start, end]\n# intervals2.append(interval)\n# openstart = False\n# counter = 0\n#else:\n# if openstart == True:\n# end = i-secondcounter\n# interval = [start, end]\n# intervals2.append(interval)\n# \n#\n print(intervals)\n if len(intervals) == 0:\n print('no strict ontime found')\n for s, e in intervals:\n print('on time interval: (', s, ',', e, ',', e-s , ' )') \n return np.array(intervals), np.array(intervals2), np.array(intervals3)\n\n\ndef find_offtime(ampavg, mintime, minamp):\n #Einheit timestep\n off_mark = np.zeros([len(ampavg)])\n for step, amp in enumerate(ampavg):\n if amp >= minamp:\n off_mark[step]=1\n if minamp > amp > 0.05:\n off_mark[step]=2\n if 0.05 > amp > 0:\n off_mark[step]=3\n # plt.plot(off_mark)\n # plt.title('Offmarks')\n # plt.show()\n openstart = False\n counter = 0\n secondcounter = 0\n intervals = []\n intervals2 = []\n intervals3 = []\n find_intervals(intervals, off_mark, 1, mintime)\n find_intervals(intervals2, off_mark, 2, mintime)\n find_intervals(intervals3, off_mark, 3, mintime)\n \n#for i,m in enumerate(off_mark):\n# if m == 1:\n# counter += 1\n# secondcounter = 0\n# if not openstart and counter == mintime:\n# start = i-counter+1\n# openstart = True\n# if m ==0 or m == 2:\n# secondcounter +=1\n# counter = 0\n# if secondcounter == mintime:\n# if openstart:\n# end = i-mintime+1\n# interval = [start, end]\n# intervals.append(interval)\n# openstart = False\n# counter = 0\n#else:\n# if openstart == True:\n# end = i-secondcounter\n# interval = [start, end]\n# intervals.append(interval)\n#\n#openstart = 0\n#counter = 0\n#secondcounter = 0\n#for i,m in enumerate(off_mark):\n# if m == 2:\n# counter += 1\n# secondcounter = 0\n# if not openstart and counter == mintime:\n# start = i-counter+1\n# openstart = True\n# if m ==0 or m == 1:\n# secondcounter +=1\n# counter = 0\n# if secondcounter == mintime:\n# if openstart:\n# end = i-mintime+1\n# interval = [start, end]\n# intervals2.append(interval)\n# openstart = False\n# counter = 0\n#else:\n# if openstart == True:\n# end = i-secondcounter\n# interval = [start, end]\n# intervals2.append(interval)\n# \n#\n print(intervals)\n if len(intervals) == 0:\n print('no strict ontime found')\n for s, e in intervals:\n print('on time interval: (', s, ',', e, ',', e-s , ' )') \n return np.array(intervals), np.array(intervals2), np.array(intervals3)\n\n\ndef find_intervals(intervals, marks, mark, mintime):\n openstart = 0\n counter = 0\n secondcounter = 0\n for i,m in enumerate(marks):\n if m == mark:\n counter += 1\n secondcounter = 0\n if not openstart and counter == mintime:\n start = i-counter+1\n openstart = True\n if m != mark:\n secondcounter +=1\n counter = 0\n if secondcounter == mintime:\n if openstart:\n end = i-mintime+1\n interval = [start, end]\n intervals.append(interval)\n openstart = False\n counter = 0\n else:\n if openstart == True:\n end = i-secondcounter\n interval = [start, end]\n intervals.append(interval)\n\n\n \n \n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "analyze_ontime.py", "file_name": "analyze_ontime.py", "file_ext": "py", "file_size_in_byte": 16886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ntpath.basename", "line_number": 30, "usage_type": "call"}, {"api_name": "ntpath.split", "line_number": 33, "usage_type": "call"}, {"api_name": "ntpath.basename", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 115, "usage_type": "call"}, {"api_name": "thunderfish.dataloader.open_data", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.memmap", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.memmap", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.memmap", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.memmap", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.full", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 227, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 229, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 237, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 491, "usage_type": "call"}]} +{"seq_id": "585112773", "text": "from collections import Counter, OrderedDict\r\nfrom functools import reduce\r\n\r\nclass Graph:\r\n def __init__(self):\r\n self.nodes = OrderedDict() \r\n self.lastNode = None\r\n\r\n self.backward_edges = []\r\n self.cycles = []\r\n\r\n def __str__(self):\r\n ret = \"\"\r\n\r\n for node in self.nodes.values():\r\n ret += str(node) + \"\\n\"\r\n\r\n return ret\r\n\r\n \"\"\"Function that adds a new line/node to the graph\"\"\"\r\n def add_new_node(self, line):\r\n newNode = Node(line)\r\n\r\n # First run\r\n if len(self.nodes) == 0:\r\n self.nodes[newNode.id()] = newNode\r\n self.lastNode = newNode\r\n return\r\n\r\n if newNode.id() not in self.nodes:\r\n self.nodes[newNode.id()] = newNode\r\n \r\n assert(self.lastNode is not None)\r\n self.nodes[self.lastNode.id()].add_new_edge(newNode)\r\n\r\n self.post_process_node(newNode)\r\n \r\n self.lastNode = newNode\r\n\r\n def post_process_node(self, newNode):\r\n # We have seen this node before so we maybe have a cycle\r\n if newNode.id() < self.lastNode.id():\r\n self.backward_edges.append((newNode.id(), self.lastNode.id()))\r\n\r\n # Find cycles in a graph: if nextNodeIndex < currentNodeIndex\r\n def find_cycles(self):\r\n \r\n counted_edges = Counter(self.backward_edges)\r\n\r\n for (start, end), count in counted_edges.items():\r\n cycle = []\r\n #TODO: get a slice of the dict\r\n for node in self.nodes.values():\r\n if node.id() >= start and node.id() <= end:\r\n cycle.append(node)\r\n\r\n self.cycles.append(Cycle(cycle.copy(), count))\r\n\r\n # Sort cycles by length\r\n # rearranged_cycles = sorted(self.cycles, key=lambda cycle: cycle.size())\r\n rearranged_cycles = []\r\n\r\n for cycle in self.cycles:\r\n # See where it belongs\r\n added = False\r\n for r_cycle in rearranged_cycles:\r\n if r_cycle.contains(cycle):\r\n r_cycle.add_new_child(cycle)\r\n added = True\r\n break\r\n \r\n # We didn't add it to this cycle tree\r\n if not added:\r\n contained = []\r\n not_contained = []\r\n\r\n # partition based on\r\n for r_cycle in rearranged_cycles:\r\n if cycle.contains(r_cycle):\r\n contained.append(r_cycle)\r\n else:\r\n not_contained.append(r_cycle)\r\n\r\n # leave the ones not contained alone\r\n rearranged_cycles = not_contained\r\n\r\n # add the contained ones to the current one\r\n for child in contained:\r\n cycle.add_new_child(child)\r\n\r\n # put it in the tree\r\n rearranged_cycles.append(cycle)\r\n\r\n self.cycles = rearranged_cycles\r\n\r\nclass Node:\r\n def __init__(self, line):\r\n tokens = line.split(maxsplit=1)\r\n \r\n assert(len(tokens) == 2)\r\n \r\n self.address = tokens[0]\r\n self.instr = tokens[1]\r\n self.nextNodes = Counter()\r\n\r\n def __str__(self):\r\n ret = \"Node @\" + self.id() + \" '\" + self.instr + \"', \"\r\n ret += \"Next nodes: \" + str( list(map(lambda node : (node.id(), node.instr), self.nextNodes.keys() )))\r\n\r\n return ret\r\n\r\n def __eq__(self, other):\r\n return self.address == other.address\r\n\r\n def __hash__(self):\r\n return hash(self.address)\r\n\r\n def id(self):\r\n return self.address\r\n\r\n def add_new_edge(self, node):\r\n self.nextNodes.update({node : 1})\r\n\r\nclass Cycle:\r\n def __init__(self, cycle, iteration_count):\r\n self.nodes = cycle\r\n self.iteration_count = iteration_count\r\n self.inner_cycles = []\r\n\r\n self.start = int(self.nodes[0].address, 16)\r\n self.end = int(self.nodes[self.size() - 1].address, 16)\r\n\r\n def tabs(self, count):\r\n return \"\\t\" * count\r\n\r\n # TODO: this function is a mess\r\n def __str__(self, indent = 0):\r\n ret = self.tabs(indent) + \"Cycle was run %d times:\\n\" % (self.iteration_count)\r\n ret += self.tabs(indent) + \"Starts at %d and ends at %d\\n\" % (self.start, self.end)\r\n\r\n # TODO: don't do this here\r\n sorted_cycles = sorted(self.inner_cycles, key=lambda cycle: cycle.start_address())\r\n\r\n if len(sorted_cycles) == 0:\r\n for node in self.nodes:\r\n ret += self.tabs(indent + 1) + \"%s\\n\" % (node.instr)\r\n return ret\r\n\r\n current_cycle_index = 0\r\n current_cycle_address = sorted_cycles[0].start\r\n\r\n current_instr_address = int(self.nodes[0].address, 16)\r\n current_instr_index = 0\r\n\r\n while current_cycle_index < len(sorted_cycles):\r\n current_cycle_address = sorted_cycles[current_cycle_index].start\r\n\r\n while current_cycle_address > current_instr_address:\r\n ret += self.tabs(indent + 1) + \"%s\\n\" % (self.nodes[current_instr_index].instr)\r\n \r\n current_instr_index = current_instr_index + 1\r\n current_instr_address = int(self.nodes[current_instr_index].address, 16)\r\n\r\n ret += sorted_cycles[current_cycle_index].__str__(indent + 1)\r\n\r\n while current_instr_address <= sorted_cycles[current_cycle_index].end:\r\n current_instr_index = current_instr_index + 1\r\n current_instr_address = int(self.nodes[current_instr_index].address, 16)\r\n\r\n current_cycle_index = current_cycle_index + 1\r\n\r\n while current_instr_index < self.size():\r\n ret += self.tabs(indent + 1) + \"%s\\n\" % (self.nodes[current_instr_index].instr)\r\n current_instr_index = current_instr_index + 1\r\n\r\n return ret\r\n\r\n def size(self):\r\n return len(self.nodes)\r\n \r\n def start_address(self):\r\n return self.start\r\n\r\n def end_address(self):\r\n return self.end\r\n\r\n def contains(self, other):\r\n assert(len(other.nodes) > 1)\r\n return self.start_address() < other.start_address() and self.end_address() > other.end_address()\r\n\r\n def add_new_child(self, other):\r\n added = False\r\n for cycle in self.inner_cycles:\r\n if cycle.contains(other):\r\n cycle.add_new_child(other)\r\n\r\n if not added:\r\n self.inner_cycles.append(other)", "sub_path": "Graph.py", "file_name": "Graph.py", "file_ext": "py", "file_size_in_byte": 6514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.OrderedDict", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 48, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "495359794", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n\tpath('', views.homepage),\n path('about/', views.about),\n path('artists/', views.artists_list),\n path('paintings/', views.paintings_list),\n path('page/', views.artists),\n path('artists//', views.artists_info),\n path('artists//paintings//', views.painting_details),\n path('artists//paintings//test/', views.painting_test)\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "sub_path": "paintings/main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 698, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "234444539", "text": "import random\r\nimport sys\r\nimport pygame\r\n\r\n# Global vars\r\nFPS = 32\r\nSCREENWIDTH = 300\r\nSCREENHEIGHT = 600\r\nSCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))\r\nGROUND_Y = SCREENHEIGHT * 0.8\r\nGAME_SPRITES = {}\r\nGAME_SOUNDS = {}\r\nPLAYER = 'resources/bird.png'\r\nBACKGROUND = 'resources/background.png'\r\nPIPE = 'resources/pipe.png'\r\n\r\n\r\ndef welcome_screen():\r\n player_x = int(SCREENWIDTH/5)\r\n player_y = int((SCREENHEIGHT - GAME_SPRITES['player'].get_height())/2)\r\n message_x = int((SCREENWIDTH - GAME_SPRITES['message'].get_width())/2)\r\n message_y = int(SCREENHEIGHT*0.13)\r\n base_x = 0\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n # Exit the game\r\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n\r\n # Start when player presses space\r\n elif event.type == pygame.KEYDOWN and (event.key == pygame.K_SPACE):\r\n return\r\n else:\r\n SCREEN.blit(GAME_SPRITES['background'], (0, 0))\r\n SCREEN.blit(GAME_SPRITES['player'], (player_x, player_y))\r\n SCREEN.blit(GAME_SPRITES['message'], (message_x, message_y))\r\n SCREEN.blit(GAME_SPRITES['base'], (base_x, GROUND_Y))\r\n pygame.display.update()\r\n FPS_CLOCK.tick(FPS)\r\n\r\n\r\ndef main_game():\r\n # Init the game\r\n score = 0\r\n player_x = int(SCREENWIDTH / 5)\r\n player_y = int(SCREENWIDTH / 2)\r\n basex = 0\r\n\r\n # Create some pipes\r\n new_pipe1 = get_random_pipe()\r\n new_pipe2 = get_random_pipe()\r\n\r\n # Upper pipes\r\n upper_pipes = [\r\n {'x': SCREENWIDTH + 200, 'y': new_pipe1[0]['y']},\r\n {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': new_pipe2[0]['y']},\r\n ]\r\n # Lower pipes\r\n lower_pipes = [\r\n {'x': SCREENWIDTH + 200, 'y': new_pipe1[1]['y']},\r\n {'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': new_pipe2[1]['y']},\r\n ]\r\n\r\n # Define speed/movement settings\r\n pipe_vel_x = -4\r\n\r\n player_vel_y = -9\r\n player_max_vel_y = 10\r\n player_acc_y = 1\r\n\r\n player_flap_acc_v = -8 # speed while flapping\r\n player_flapped = False\r\n\r\n while True:\r\n # Main loop\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN and (event.key == pygame.K_SPACE or event.key == pygame.K_UP):\r\n if player_y > 0:\r\n player_vel_y = player_flap_acc_v\r\n player_flapped = True\r\n\r\n # Check if player collides/crashes\r\n crash_test = is_collide(player_x, player_y, upper_pipes, lower_pipes)\r\n\r\n if crash_test:\r\n return\r\n\r\n # Check/update score\r\n player_mid_pos = player_x + GAME_SPRITES['player'].get_width() / 2\r\n for pipe in upper_pipes:\r\n pipe_mid_pos = pipe['x'] + GAME_SPRITES['pipe'][0].get_width() / 2\r\n if pipe_mid_pos <= player_mid_pos < pipe_mid_pos + 4:\r\n score += 1\r\n print(f\"Your score is {score}\")\r\n\r\n if player_vel_y < player_max_vel_y and not player_flapped:\r\n player_vel_y += player_acc_y\r\n\r\n if player_flapped:\r\n player_flapped = False\r\n player_height = GAME_SPRITES['player'].get_height()\r\n player_y = player_y + min(player_vel_y, GROUND_Y - player_y - player_height)\r\n\r\n # Move the pipes/playing field\r\n for upperPipe, lowerPipe in zip(upper_pipes, lower_pipes):\r\n upperPipe['x'] += pipe_vel_x\r\n lowerPipe['x'] += pipe_vel_x\r\n\r\n # Add a new pipe\r\n if 0 < upper_pipes[0]['x'] < 5:\r\n new_pipe = get_random_pipe()\r\n upper_pipes.append(new_pipe[0])\r\n lower_pipes.append(new_pipe[1])\r\n\r\n # Remove a pipe if it's off-screen\r\n if upper_pipes[0]['x'] < -GAME_SPRITES['pipe'][0].get_width():\r\n upper_pipes.pop(0)\r\n lower_pipes.pop(0)\r\n\r\n # Blit the background and pipe sprites\r\n SCREEN.blit(GAME_SPRITES['background'], (0, 0))\r\n for upperPipe, lowerPipe in zip(upper_pipes, lower_pipes):\r\n SCREEN.blit(GAME_SPRITES['pipe'][0], (upperPipe['x'], upperPipe['y']))\r\n SCREEN.blit(GAME_SPRITES['pipe'][1], (lowerPipe['x'], lowerPipe['y']))\r\n\r\n SCREEN.blit(GAME_SPRITES['base'], (basex, GROUND_Y))\r\n SCREEN.blit(GAME_SPRITES['player'], (player_x, player_y))\r\n\r\n pygame.display.update()\r\n FPS_CLOCK.tick(FPS)\r\n\r\n\r\ndef is_collide(player_x, player_y, upper_pipes, lower_pipes):\r\n if player_y > GROUND_Y - 25 or player_y < 0:\r\n GAME_SOUNDS['hit'].play()\r\n return True\r\n\r\n for pipe in upper_pipes:\r\n pipe_height = GAME_SPRITES['pipe'][0].get_height()\r\n if player_y < pipe_height + pipe['y'] and abs(player_x - pipe['x']) < GAME_SPRITES['pipe'][0].get_width():\r\n return True\r\n\r\n for pipe in lower_pipes:\r\n if (player_y + GAME_SPRITES['player'].get_height() > pipe['y']) and abs(player_x - pipe['x']) < \\\r\n GAME_SPRITES['pipe'][0].get_width():\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef get_random_pipe():\r\n # Generate positions of two pipes(one bottom straight and one top rotated ) for blitting on the screen\r\n pipe_height = GAME_SPRITES['pipe'][0].get_height()\r\n offset = SCREENHEIGHT / 3\r\n y2 = offset + random.randrange(0, int(SCREENHEIGHT - GAME_SPRITES['base'].get_height() - 1.2 * offset))\r\n pipe_x = SCREENWIDTH + 10\r\n y1 = pipe_height - y2 + offset\r\n pipe = [\r\n {'x': pipe_x, 'y': -y1}, # upper Pipe\r\n {'x': pipe_x, 'y': y2} # lower Pipe\r\n ]\r\n return pipe\r\n\r\n\r\nif __name__ == '__main__':\r\n # Main\r\n pygame.init()\r\n FPS_CLOCK = pygame.time.Clock()\r\n pygame.display.set_caption('Flappy Bird')\r\n\r\n GAME_SPRITES['message'] = pygame.image.load('resources/title.png').convert_alpha()\r\n GAME_SPRITES['base'] = pygame.image.load('resources/base.png').convert_alpha()\r\n GAME_SPRITES['pipe'] = (pygame.transform.rotate(pygame.image.load(PIPE).convert_alpha(), 180),\r\n pygame.image.load(PIPE).convert_alpha())\r\n\r\n GAME_SPRITES['background'] = pygame.image.load(BACKGROUND).convert()\r\n GAME_SPRITES['player'] = pygame.image.load(PLAYER).convert_alpha()\r\n\r\n while True:\r\n welcome_screen()\r\n main_game()\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.display.set_mode", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 80, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 134, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 172, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 181, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 182, "usage_type": "attribute"}]} +{"seq_id": "456524188", "text": "from sklearn.externals.six import StringIO\nimport pydotplus as pydot\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import RandomForestClassifier\nimport xlrd\nfrom sklearn import tree\nfrom sklearn.neighbors import KNeighborsClassifier\nimport random\nimport pandas\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import LeaveOneOut\nfrom joblib import Parallel, delayed\nfrom sklearn.ensemble import ExtraTreesClassifier\n\ndef splitdata(X,Y,ratio,seed):\n '''This function is to split the data into train and test data randomly and preserve the pos/neg ratio'''\n n_samples = X.shape[0]\n y = Y.astype(int)\n y_bin = np.bincount(y)\n classes = np.nonzero(y_bin)[0]\n #fint the indices for each class\n indices = []\n for i in classes:\n indice = []\n for j in range(n_samples):\n if y[j] == i:\n indice.append(j)\n indices.append(indice)\n train_indices = []\n for i in indices:\n k = int(len(i)*ratio)\n train_indices += (random.Random(seed).sample(i,k=k))\n #find the unused indices\n s = np.bincount(train_indices,minlength=n_samples)\n mask = s==0\n test_indices = np.arange(n_samples)[mask]\n return train_indices,test_indices\ndef rf_dis(n_trees, X,Y,train_indices,test_indices,seed, mleaf, mf):\n clf = RandomForestClassifier(n_estimators=n_trees,\n random_state=seed, oob_score=True, n_jobs=-1,min_samples_leaf = mleaf, max_features =mf )\n clf = clf.fit(X[train_indices], Y[train_indices])\n #pred = clf.predict(X[test_indices])\n weight = clf.score(X[test_indices], Y[test_indices])\n #print(1 - clf.oob_score_)\n n_samples = X.shape[0]\n dis = np.zeros((n_samples,n_samples))\n for i in range(n_samples):\n dis[i][i] = 0\n res = clf.apply(X)\n for i in range(n_samples):\n for j in range(i+1,n_samples):\n a = np.ravel(res[i])\n b = np.ravel(res[j])\n score = a == b\n d = float(score.sum())/n_trees\n dis[i][j] =dis[j][i] = d\n X_features1 = np.transpose(dis)\n X_features2 = X_features1[train_indices]\n X_features3 = np.transpose(X_features2)\n return X_features3[train_indices],X_features3[test_indices],weight\ndef EXrf_dis(n_trees, X,Y,train_indices,test_indices,seed, mleaf, mf):\n clf = ExtraTreesClassifier(n_estimators=n_trees,\n random_state=seed, oob_score=True, n_jobs=-1,min_samples_leaf = mleaf, max_features =mf )\n clf = clf.fit(X[train_indices], Y[train_indices])\n #pred = clf.predict(X[test_indices])\n weight = clf.score(X[test_indices], Y[test_indices])\n #print(1 - clf.oob_score_)\n n_samples = X.shape[0]\n dis = np.zeros((n_samples,n_samples))\n for i in range(n_samples):\n dis[i][i] = 0\n res = clf.apply(X)\n for i in range(n_samples):\n for j in range(i+1,n_samples):\n a = np.ravel(res[i])\n b = np.ravel(res[j])\n score = a == b\n d = float(score.sum())/n_trees\n dis[i][j] =dis[j][i] = d\n X_features1 = np.transpose(dis)\n X_features2 = X_features1[train_indices]\n X_features3 = np.transpose(X_features2)\n return X_features3[train_indices],X_features3[test_indices],weight\ndef knn(n_neb, train_x, train_y, test_x, test_y):\n clf =KNeighborsClassifier(n_neighbors=n_neb, n_jobs=-1)\n clf.fit(train_x, train_y)\n test_error = clf.score(test_x, test_y)\n test_auc = clf.predict_proba(test_x)\n return test_error\n\ndef onn(test_x, train_y, test_y):\n n_s = test_x.shape[0]\n l = []\n for i in range(n_s):\n li = test_x[i]\n min = np.min(li)\n #find the positition of min\n p = li.tolist().index(min)\n l.append(train_y[p])\n s = accuracy_score(test_y, l)\n return s\ndef RF(n_trees, seed, train_x, train_y, test_x, test_y):\n clf = RandomForestClassifier(n_estimators=n_trees,\n random_state = seed, oob_score=True)\n clf = clf.fit(train_x,train_y)\n oob_error = 1 - clf.oob_score_\n test_error = clf.score(test_x,test_y)\n test_auc = clf.predict_proba(test_x)\n #filename = './tmp1/RF_%d_.pkl'%seed\n #_ = joblib.dump(clf, filename, compress=9)\n return test_error\n\ndef ExRF(n_trees, seed, train_x, train_y, test_x, test_y):\n clf = ExtraTreesClassifier(n_estimators=n_trees,\n random_state = seed, oob_score=True)\n clf = clf.fit(train_x,train_y)\n oob_error = 1 - clf.oob_score_\n test_error = clf.score(test_x,test_y)\n test_auc = clf.predict_proba(test_x)\n #filename = './tmp1/RF_%d_.pkl'%seed\n #_ = joblib.dump(clf, filename, compress=9)\n return test_error\n'''\nX = [[0,0.2,0.6,0.8],\n [0.2,0,0.65,0.7],\n [0.6,0.65,0,0.1],\n [0.8,0.7,0.1,0]]\nY = [0,0,1,1]\ntest_x = [[0.2,0.1,0.5,0.6],\n [0.6,0.5,0.4,0.1]]\ntest_x = np.array(test_x)\nY = np.array(Y)\nt_Y = Y.transpose()\ntest_y = [0,1]\ntest_y = np.array(test_y)\ntest_y = test_y.transpose()\nprint(onn(test_x,t_Y,test_y))\n'''\nurl = 'text_pr_1.csv'\ndataframe = pandas.read_csv(url, header=None)\narray = dataframe.values\nX = array\nY = pandas.read_csv('label_progression.csv', header=None)\nY = Y.values\nY = np.ravel(Y)\nprint(Y.shape)\n\nfor i in range(4):\n url = 'text_pr_' + str(i + 2) + '.csv'\n dataframe = pandas.read_csv(url, header=None)\n array = dataframe.values\n X1 = array\n print(X1.shape)\n X = np.concatenate((X, X1), axis=1)\n\nProgression1 = X[:, 0:1680]\nProgression2 = X[:, 1680:3360]\nProgression3 = X[:, 3360:5040]\nProgression4 = X[:, 5040:6720]\nProgression5 = X[:, 6720:6745]\nProgressionY = Y\nurl = 'text_lg_1.csv'\ndataframe = pandas.read_csv(url, header=None)\narray = dataframe.values\nX = array\nY = pandas.read_csv('label_lowGrade.csv', header=None)\nY = Y.values\nY = np.ravel(Y)\nprint(Y.shape)\n\nfor i in range(4):\n url = 'text_lg_' + str(i + 2) + '.csv'\n dataframe = pandas.read_csv(url, header=None)\n array = dataframe.values\n X1 = array\n print(X1.shape)\n X = np.concatenate((X, X1), axis=1)\n\nlowGrade1 = X[:, 0:1680]\nlowGrade2 = X[:, 1680:3360]\nlowGrade3 = X[:, 3360:5040]\nlowGrade4 = X[:, 5040:6720]\nlowGrade5 = X[:, 6720:6745]\nlowGradeY = Y\nurl = 'text_nonIDH1_1.csv'\ndataframe = pandas.read_csv(url, header=None)\narray = dataframe.values\nX = array\nY = pandas.read_csv('label_nonIDH1.csv', header=None)\nY = Y.values\nY = np.ravel(Y)\nprint(Y.shape)\n\nfor i in range(4):\n url = 'text_nonIDH1_' + str(i + 2) + '.csv'\n dataframe = pandas.read_csv(url, header=None)\n array = dataframe.values\n X1 = array\n print(X1.shape)\n X = np.concatenate((X, X1), axis=1)\n\nnonIDH1 = X[:, 0:1680]\nnonIDH2 = X[:, 1680:3360]\nnonIDH3 = X[:, 3360:5040]\nnonIDH4 = X[:, 5040:6720]\nnonIDH5 = X[:, 6720:6745]\nnonIDHY = Y\nurl = 'text_id_1.csv'\ndataframe = pandas.read_csv(url, header=None)\narray = dataframe.values\nX = array\nY = pandas.read_csv('label_IDHCodel.csv', header=None)\nY = Y.values\nY = np.ravel(Y)\nprint(Y.shape)\n\nfor i in range(4):\n url = 'text_id_' + str(i + 2) + '.csv'\n dataframe = pandas.read_csv(url, header=None)\n array = dataframe.values\n X1 = array\n print(X1.shape)\n X = np.concatenate((X, X1), axis=1)\n\nIDHCodel1 = X[:, 0:1680]\nIDHCodel2 = X[:, 1680:3360]\nIDHCodel3 = X[:, 3360:5040]\nIDHCodel4 = X[:, 5040:6720]\nIDHCodel5 = X[:, 6720:6745]\nIDHCodelY = Y\nurl = './MyData.csv'\ndataframe = pandas.read_csv(url)#, header=None)\narray = dataframe.values\nX = array[:,1:]\nY = pandas.read_csv('./MyDatalabel.csv')\nY = Y.values\nY = Y[:,1:]\nY = Y.transpose()\nY = np.ravel(Y)\nn_samples = X.shape[0]\nn_features = X.shape[1]\nfor i in range(len(Y)):\n if Y[i] == 4:\n Y[i]=1\n\nMetabo1 = X[:, 0:2]\nMetabo2 = X[:, 2:21]\nMetabo3 = X[:, 21:]\nMetaboY = Y\n'''\npath = './LSVT.xlsx'\npath1 = './LSVTlabel.xlsx'\n\nworkbook = xlrd.open_workbook(path)\nworkbook1 = xlrd.open_workbook(path1)\nworksheet = workbook.sheet_by_index(0)\nworksheet1 = workbook1.sheet_by_index(0)\n# Change this depending on how many header rows are present\n# Set to 0 if you want to include the header data.\noffset = 0\n\ndata = []\nfor i, row in enumerate(range(worksheet.nrows)):\n if i <= offset: # (Optionally) skip headers\n continue\n r = []\n for j, col in enumerate(range(worksheet.ncols)):\n r.append(worksheet.cell_value(i, j))\n data.append(r)\n\nlabel = []\nfor i, row in enumerate(range(worksheet1.nrows)):\n if i <= offset: # (Optionally) skip headers\n continue\n r = []\n for j, col in enumerate(range(worksheet1.ncols)):\n r.append(worksheet1.cell_value(i, j))\n label += r\nX = np.array(data)\nY = np.array(label)\n\nLSVT1 = X[:,0:51]\nLSVT2 =X[:, 51:83]\nLSVT3 = X[:, 124:]\nLSVT4 = X[:, 83:124]\nLSVTY = Y\nurl = 'Cal7_1.csv'\ndataframe = pandas.read_csv(url) # , header=None)\narray = dataframe.values\nX = array[:, 1:]\n\nfor i in range(5):\n url = 'Cal7_' + str(i + 2) + '.csv'\n dataframe = pandas.read_csv(url) # , header=None)\n array = dataframe.values\n X1 = array[:, 1:]\n X = np.concatenate((X, X1), axis=1)\nY = pandas.read_csv('Cal7_label.csv')\nY = Y.values\n\nY = Y[:, 1:]\n# Y = Y.transpose()\nY = np.ravel(Y)\nCal71 = X[:, 0:48]\nCal72 = X[:, 48:88]\nCal73 = X[:, 88:342]\nCal74 = X[:, 342:2326]\nCal75 = X[:, 2326:2838]\nCal76 = X[:, 2838:]\nCal7Y = Y\n'''\ndef mcode(ite):\n if ite ==0:\n file = \"nonIDH1\"\n dataX = nonIDH1\n dataY = nonIDHY\n if ite == 1:\n file = \"nonIDH5\"\n dataX = nonIDH5\n dataY = nonIDHY\n if ite ==2:\n file = \"IDHCodel2\"\n dataX = IDHCodel2\n dataY = IDHCodelY\n if ite == 3:\n file = \"IDHCodel4\"\n dataX = IDHCodel4\n dataY = IDHCodelY\n if ite ==4:\n file = \"lowGrade3\"\n dataX = lowGrade3\n dataY = lowGradeY\n if ite == 5:\n file = \"lowGrade4\"\n dataX = lowGrade4\n dataY = lowGradeY\n if ite ==6:\n file = \"Progression1\"\n dataX = Progression1\n dataY = ProgressionY\n if ite == 7:\n file = \"Progression5\"\n dataX = Progression5\n dataY = ProgressionY\n if ite ==8:\n file = \"Metabo2\"\n dataX = Metabo2\n dataY = MetaboY\n if ite == 9:\n file = \"Metabo3\"\n dataX = Metabo3\n dataY = MetaboY\n perfonn = [[[0 for k in range(4)] for j in range(5)] for i in range(2)]\n perfrf = [[[0 for k in range(4)] for j in range(5)] for i in range(2)]\n perfrfdis = [[[0 for k in range(4)] for j in range(5)] for i in range(2)]\n\n for i in range(2):\n if i == 0:\n mtry = 1\n else:\n mtry = \"auto\"\n for j in range(5):\n if j == 0:\n ntrees = 50\n if j == 1:\n ntrees = 100\n if j == 2:\n ntrees = 500\n if j == 3:\n ntrees = 800\n if j == 4:\n ntrees = 1000\n for k in range(4):\n if k == 0:\n minleaf = 0.5\n if k == 1:\n minleaf = 0.3\n if k == 2:\n minleaf = 0.1\n if k == 3:\n minleaf = 1\n\n err = []\n errr = []\n eknn = []\n erf = []\n se = 1000\n for ite in range(10):\n se = se+1\n train_in, test_in = splitdata(dataX,dataY,0.5,se)\n\n X_features_train1, X_features_test1, w1 = rf_dis(n_trees=ntrees, X=dataX, Y=dataY, train_indices=train_in,\n test_indices=test_in, seed=se,mleaf = minleaf,mf = mtry)\n e = onn( train_y=dataY[train_in],test_x=X_features_test1,test_y=dataY[test_in])\n #e2 = knn(n_neb=1, train_x=X_features_train1, train_y=Y[train_in], test_x=X_features_test1, test_y=Y[test_in])\n e3 = RF(n_trees=500,seed=se,train_x=X_features_train1, train_y=dataY[train_in], test_x=X_features_test1, test_y=dataY[test_in])\n #print(i,se,ntrees)\n e1 = w1\n err.append(e)\n errr.append(e1)\n #eknn.append(e2)\n erf.append(e3)\n #print(len(err),len(errr),len(eknn),len(erf))\n #err1.append(np.mean(err)) #performance of onn\n perfonn[i][j][k]= np.mean(err)\n #err2.append(np.mean(errr))#performance of RF\n perfrf[i][j][k] = np.mean(errr)\n #err3.append(np.mean(eknn))\n #err4.append(np.mean(erf))#performance of RF dis\n perfrfdis[i][j][k] = np.mean(erf)\n\n\n #prediction = pandas.DataFrame(data).to_csv(file)\n filename1 = file + \"onnRF\"\n filename2 = file + \"rfRF\"\n filename3 = file + \"rfdisRF\"\n np.save(filename1, perfonn)\n np.save(filename2, perfrf)\n np.save(filename3, perfrfdis)\n\nif __name__ == '__main__':\n Parallel(n_jobs=10)(delayed(mcode)(ite=i) for i in range(10))\n'''\npl.xlabel('tree number')\n\npl.plot(tr, err1)# use pylab to plot x and y\npl.plot(tr, err2)# use pylab to plot x and y\npl.plot(tr, err3)# use pylab to plot x and y\npl.plot(tr, err4)# use pylab to plot x and y\npl.legend(['1nn', 'RF', \"Dis1NN\"], loc='upper left')\npl.show()# show the plot on the screen'''\n\n\n'''\nclf = RandomForestClassifier(n_estimators=500,\n random_state=1000, oob_score=True, n_jobs=-1)\nclf.fit(X, Y)\ntrees = clf.estimators_\nld = [estimator.tree_.max_depth for estimator in clf.estimators_]\nprint(np.max(ld))\ndot_data = StringIO()\ntree.export_graphviz(trees[3], out_file=dot_data,\n\nfilled=True, rounded=True,proportion =True,\nspecial_characters=True)\ngraph = pydot.graph_from_dot_data(dot_data.getvalue())\ngraph.write_pdf('BBC2.pdf')\n'''", "sub_path": "progression/AllProgression.py", "file_name": "AllProgression.py", "file_ext": "py", "file_size_in_byte": 13818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.bincount", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 21, "usage_type": "call"}, {"api_name": "random.Random", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 155, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 164, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 169, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 178, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 192, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 201, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 210, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 215, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 233, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 420, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 423, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 423, "usage_type": "call"}]} +{"seq_id": "311366784", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.utils import timezone\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.urls import reverse, reverse_lazy\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nimport django.views.generic as g\nfrom django.db.models import Count,Q\nfrom .models import Question, Option, Choice, Tag\nfrom apps.home.models import Profile\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .forms import OptionFormSet, QuestionForm, VoteForm\nfrom django.db import transaction\nfrom functools import reduce\nimport datetime\nfrom django.core.exceptions import ValidationError\n# Create your views here.\n\nclass HomeView(g.TemplateView):\n template_name=\"polls/index.html\"\n\n def get_context_data(self, **kwargs):\n context=super(HomeView,self).get_context_data(**kwargs)\n now=timezone.now()\n cutoff=timezone.now()-datetime.timedelta(days=7)\n questions = context['recents']=Question.objects.filter(\n pub_date__gte=cutoff,\n pub_date__lte=now,\n )\n\n if 'search' in self.request.GET:\n search_term = self.request.GET['search']\n questions=questions.filter(text__icontains=search_term)\n if 'tags' in self.request.GET:\n tag_name = self.request.GET['tags']\n tag = Tag.objects.get(text=tag_name)\n questions=questions.filter(tags__id=tag.id)\n context['recents'] = questions.order_by('-id')[:5]\n context['tags']=Tag.objects.all();\n return context\n\n def get(self,request,*args,**kwargs):\n return super(HomeView,self).get(request,args,kwargs)\n\n def post(self,request,*args,**kwargs):\n print('posting')\n return super(HomeView,self).post(request,args,kwargs)\n\n\nclass ProfileView(g.DetailView):\n login_url=reverse_lazy(\"login\")\n template_name=\"polls/profile.html\"\n\n def get_context_data(self,**kwargs):\n now=timezone.now()\n cutoff=timezone.now()-datetime.timedelta(days=7)\n context=super(ProfileView,self).get_context_data(**kwargs)\n context['recents'] = Question.objects.filter(\n creator=context['profile'].user.id,\n pub_date__gte=cutoff,\n pub_date__lte=now\n ).order_by('-id')[:5]\n return context\n\n def get_object(self):\n if 'id' in self.kwargs:\n profile = get_object_or_404(Profile,user__id=self.kwargs['id'])\n return profile\n else:\n return self.request.user.profile\n\nclass ListView(g.ListView):\n template_name=\"polls/list.html\"\n\n def get_queryset(self):\n today = datetime.datetime.now()\n if 'user_id' in self.kwargs:\n queryset = Question.objects.filter(creator=self.request.user)\n if self.request.user.id==self.kwargs['user_id']:\n queryset = queryset.filter(\n creator=self.request.user,\n public=True,\n pub_date__lte=today\n )\n else:\n queryset = Question.objects.filter(\n public=True,\n pub_date__lte=today\n )\n if 'search' in self.request.GET:\n print(\"Got a search term\")\n search_term = self.request.GET['search']\n quryset=queryset.filter(text__icontains=search_term)\n if 'tags' in self.request.GET:\n tag_name = self.request.GET['tags']\n tag = Tag.objects.get(text=tag_name)\n queryset=queryset.filter(tags__id=tag.id)\n\n return queryset.order_by('-id')\n\n\n def get_context_data(self, **kwargs):\n context=super(ListView,self).get_context_data(**kwargs)\n context['tags']=Tag.objects.all();\n # if 'user_id' in kwargs:\n # context['user_list'] = True\n # for question in context['question_list']:\n # choices = Choice.objects.filter(question=question)\n # question.votes=choices.count()\n return context\n\nclass CreateView(LoginRequiredMixin,g.CreateView):\n form_class=QuestionForm\n login_url=reverse_lazy(\"login\")\n template_name=\"polls/create.html\"\n model=Question\n context_object_name=\"questions\"\n\n def get(self,request,*args,**kwargs):\n self.object=None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n option_form = OptionFormSet()\n return self.render_to_response(self.get_context_data(\n form=form,option_form=option_form\n ))\n\n def post(self,request,*args,**kwargs):\n self.object=None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n option_form = OptionFormSet(self.request.POST, self.request.FILES)\n if form.is_valid() and option_form.is_valid():\n return self.form_valid(form,option_form)\n else:\n return self.form_invalid(form,option_form)\n\n\n\n def form_invalid(self, form, option_form):\n return self.render_to_response(\n self.get_context_data(form=form,option_form=option_form))\n\n def form_valid(self,form,option_form,*args,**kwargs):\n question = form.save(commit=False)\n question.creator=self.request.user\n with transaction.atomic():\n self.object=form.save()\n option_form.instance=self.object\n option_form.save()\n return HttpResponseRedirect(reverse_lazy(\"polls:vote\" ,args=[question.id]))\n\nclass VoteFormView(g.CreateView):\n template_name=\"polls/vote_form.html\"\n form_class=VoteForm\n model=Choice\n\n def get_initial(self):\n initial=super(VoteFormView,self).get_initial()\n question = question = get_object_or_404(Question,id=self.kwargs['question_id'])\n initial['question']=question\n return initial\n\n def post(self,request,*args,**kwargs):\n return super(VoteFormView,self).post(request,*args,**kwargs)\n\n def get_context_data(self,**kwargs):\n context = super(VoteFormView,self).get_context_data(**kwargs)\n kwargs = context['view'].kwargs\n question = question = get_object_or_404(Question,id=kwargs['question_id'])\n profile = question.creator.profile\n context['question'] = question\n context['profile'] = profile\n return context\n\n def form_invalid(self,form):\n return super(VoteFormView,self).form_invalid(form)\n\n def form_valid(self,form):\n form.save()\n return HttpResponseRedirect(reverse_lazy(\"polls:result\", kwargs={'question_id':self.request.POST['question']}))\n\nclass VoteView(g.CreateView):\n template_name=\"polls/vote.html\"\n form_class=VoteForm\n model=Choice\n\n def get_initial(self):\n initial=super(VoteView,self).get_initial()\n question = get_object_or_404(Question,id=self.kwargs['question_id'])\n initial['question']=question\n return initial\n\n def post(self,request,*args,**kwargs):\n return super(VoteView,self).post(request,*args,**kwargs)\n\n def get_context_data(self,**kwargs):\n context = super(VoteView,self).get_context_data(**kwargs)\n kwargs = context['view'].kwargs\n question = get_object_or_404(Question,id=kwargs['question_id'])\n profile = question.creator.profile\n context['question'] = question\n context['profile'] = profile\n return context\n\n def form_invalid(self,form):\n return super(VoteView,self).form_invalid(form)\n\n def form_valid(self,form):\n form.save()\n return HttpResponseRedirect(reverse_lazy(\"polls:result\", kwargs={'question_id':self.request.POST['question']}))\n\nclass ResultsView(g.TemplateView):\n template_name=\"polls/results.html\"\n def get_context_data(self, **kwargs):\n options = Option.objects.filter(question__id=kwargs['question_id'])\n question=question = get_object_or_404(Question,id=kwargs['question_id'])\n results = []\n count = Choice.objects.filter(option__in=options).count()\n for option in options:\n votes = Choice.objects.filter(option=option).count()\n if count > 0:\n result={'text':option.text, 'image':option.image, 'votes':round(100*float(votes)/float(count))}\n else:\n result={'text':option.text, 'image':option.image, 'votes':0}\n results.append(result)\n context=super(ResultsView,self).get_context_data(**kwargs)\n context['results']=results\n context['question']=question\n context['profile']=question.creator.profile\n context['count']=count\n return context\n", "sub_path": "apps/polls/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 20, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 25, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 25, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Question.objects.filter", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Tag.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 40, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 51, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 52, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 57, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 57, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Question.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 68, "usage_type": "call"}, {"api_name": "apps.home.models.Profile", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.views.generic.ListView", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 73, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.Question.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Question.objects.filter", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Question.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.Question", "line_number": 87, "usage_type": "name"}, {"api_name": "models.Tag.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 97, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 105, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 113, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 113, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 113, "usage_type": "name"}, {"api_name": "forms.QuestionForm", "line_number": 114, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 117, "usage_type": "name"}, {"api_name": "forms.OptionFormSet", "line_number": 124, "usage_type": "call"}, {"api_name": "forms.OptionFormSet", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 148, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 148, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 152, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 152, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 154, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 154, "usage_type": "name"}, {"api_name": "forms.VoteForm", "line_number": 156, "usage_type": "name"}, {"api_name": "models.Choice", "line_number": 157, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 161, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 161, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 171, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 171, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 182, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 182, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 184, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 184, "usage_type": "name"}, {"api_name": "forms.VoteForm", "line_number": 186, "usage_type": "name"}, {"api_name": "models.Choice", "line_number": 187, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 191, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 191, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 201, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 201, "usage_type": "argument"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 212, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 212, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 214, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 214, "usage_type": "name"}, {"api_name": "models.Option.objects.filter", "line_number": 217, "usage_type": "call"}, {"api_name": "models.Option.objects", "line_number": 217, "usage_type": "attribute"}, {"api_name": "models.Option", "line_number": 217, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 218, "usage_type": "call"}, {"api_name": "models.Question", "line_number": 218, "usage_type": "argument"}, {"api_name": "models.Choice.objects.filter", "line_number": 220, "usage_type": "call"}, {"api_name": "models.Choice.objects", "line_number": 220, "usage_type": "attribute"}, {"api_name": "models.Choice", "line_number": 220, "usage_type": "name"}, {"api_name": "models.Choice.objects.filter", "line_number": 222, "usage_type": "call"}, {"api_name": "models.Choice.objects", "line_number": 222, "usage_type": "attribute"}, {"api_name": "models.Choice", "line_number": 222, "usage_type": "name"}]} +{"seq_id": "485047342", "text": "import collections\n\nclass Node:\n def __init__(self, root):\n self.left = None\n self.right = None\n self.val = root\n\ndef levelOrder(self, root):\n if root is None:\n return []\n\n result = []\n q = collections.deque([root])\n while len(q) != 0:\n numnodes = len(q)\n temp = []\n for _ in range(numnodes):\n node = q.popleft()\n temp.append(node.val)\n if node.left is not None:\n q.append(node.left)\n if node.right is not None:\n q.append(node.right)\n result.append(temp)\n return result\n\n\nroot = Node(1)\nroot.left = Node(2)\nroot.right = Node(3)\nroot.left.left = Node(4)\nroot.left.right = Node(5)\n#\"Preorder traversal of binary tree is\"\n\nlevelOrder(root)\n\n#Inorder traversal of binary tree is\"\n#printInorder(root)\n\n#\"Postorder traversal of binary tree is\"\n#printPostorder(root)", "sub_path": "recursion/test1.py", "file_name": "test1.py", "file_ext": "py", "file_size_in_byte": 907, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.deque", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "610690020", "text": "'''\r\nHere I implement a LinkedList to separate chaining hash table\r\n'''\r\nclass Empty(Exception):\r\n pass\r\nfrom collections import MutableMapping\r\n\r\nclass MapBase(MutableMapping):\r\n\r\n class _Item:\r\n\r\n __slots__='_key','_value'\r\n\r\n def __init__(self,k,v):\r\n self._key=k\r\n self._value=v\r\n\r\n def __eq__(self, other):\r\n return type(self) is type(other) and self._key is other._key\r\n\r\n def __ne__(self, other):\r\n return not self==other\r\n\r\n def __lt__(self, other):\r\n return self._key < other._key\r\n\r\n\r\nclass SinglyLinkedList:\r\n\r\n class _Node:\r\n __slots__='_element','_next'\r\n def __init__(self,element,next):\r\n self._element=element\r\n self._next=next\r\n\r\n def __init__(self):\r\n self._head=self._Node(None,None)\r\n self._size = 0\r\n\r\n def __len__(self):\r\n return self._size\r\n def is_empty(self):\r\n return self._size == 0\r\n\r\n # BigO cost: O(n)\r\n def _locate(self,e):\r\n cursor=self._head\r\n while cursor._next is not None:\r\n if cursor._next._element==e:\r\n return (cursor,cursor._next)\r\n cursor = cursor._next\r\n raise ValueError(\"No such a value been found!\")\r\n # BigO cost: O(1)\r\n def _add(self,e):\r\n new=self._Node(e,None)\r\n if self.is_empty():\r\n self._head._next=new\r\n else:\r\n new._next=self._head._next\r\n self._head._next=new\r\n self._size += 1\r\n\r\n # BigO cost: O(n)\r\n def _change(self,e,ne):\r\n try:\r\n pre,bid=self._locate(e)\r\n pre._element=ne\r\n except ValueError:\r\n print(\"Not such a value!\")\r\n\r\n def _del(self,e):\r\n try:\r\n pre,bid=self._locate(e)\r\n pre._next=bid._next\r\n bid=None\r\n self._size -= 1\r\n except ValueError:\r\n print(\"Not such a value!\")\r\n\r\n def __iter__(self):\r\n cursor=self._head._next\r\n if self.is_empty():\r\n raise Empty(\"The list is empty!\")\r\n while cursor is not None:\r\n yield cursor._element\r\n cursor = cursor._next\r\n\r\nfrom random import randrange\r\nclass ChainHashTable(MapBase):\r\n '''\r\n use _Item to store key and value pairs\r\n implement SinglyLinkedList into separate chaining\r\n '''\r\n\r\n\r\n def __init__(self,cap=11,p=109345121):\r\n self._table=[None]*cap\r\n self._scale=1+randrange(p-1)\r\n self._shift=randrange(p)\r\n self._size = 0\r\n self._prime=p\r\n\r\n def _hash_function(self,k):\r\n return (self._scale*hash(k)+self._shift) % self._prime % len(self._table)\r\n\r\n def __len__(self):\r\n return self._size\r\n\r\n\r\n def __setitem__(self, k, v):\r\n if self._size>len(self._table)//2:\r\n self._rehasing(len(self._table)*2-1)\r\n loc=self._hash_function(k)\r\n if self._table[loc] is None:\r\n self._table[loc] = SinglyLinkedList()\r\n if self._table[loc].is_empty():\r\n self._table[loc]._add(self._Item(k,v))\r\n self._size += 1\r\n else:\r\n found=False\r\n for item in self._table[loc]:\r\n if item._key == k:\r\n item._value=v\r\n found=True\r\n if not found:\r\n self._table[loc]._add(self._Item(k,v))\r\n self._size += 1\r\n\r\n\r\n\r\n def __getitem__(self, k):\r\n loc=self._hash_function(k)\r\n if self._table[loc] is None:\r\n raise KeyError(\"Key Error: \"+ repr(loc))\r\n else:\r\n for item in self._table[loc]:\r\n if item._key==k:\r\n return item._value\r\n\r\n def __delitem__(self, k):\r\n loc=self._hash_function(k)\r\n if self._table[loc] is None:\r\n raise KeyError(\"Key Error: \"+ repr(loc))\r\n\r\n else:\r\n for item in self._table[loc]:\r\n if item._key==k:\r\n self._table[loc]._del(item)\r\n self._size -=1\r\n return\r\n raise KeyError(\"Key Error: \"+ repr(loc))\r\n\r\n def __iter__(self):\r\n if self._size >0 :\r\n for bucket in self._table:\r\n if bucket is not None and not bucket.is_empty():\r\n for item in bucket:\r\n yield item._key\r\n\r\n def _rehasing(self,c):\r\n old=list(self.items())## this attribute comes from ABC (MutableMapping)\r\n self._size = 0\r\n self._table=[None]*c\r\n for (k,v) in old:\r\n self.__setitem__(k,v) ## or self[k]=v\r\n\r\nif __name__=='__main__':\r\n a=ChainHashTable()\r\n d={i:chr(i) for i in range(100)}\r\n for i in range(100):\r\n a[i]=d[i]\r\n len(a)\r\n list(a)\r\n for i in range(100):\r\n del a[i]\r\n len(a)\r\n list(a)\r\n", "sub_path": "hash_table.py", "file_name": "hash_table.py", "file_ext": "py", "file_size_in_byte": 4890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.MutableMapping", "line_number": 8, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 98, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "299763961", "text": "#!/usr/bin/python3\nimport urllib.request\nimport regex\nimport sys\nimport os\n\n\ndef main(argv):\n\n # Load the list of typos from the English Wikipedia\n webUrl = urllib.request.urlopen(\"\\\nhttps://en.wikipedia.org/w/index.php?title=Wikipedia:AutoWikiBrowser/\\\nTypos&action=raw\")\n\n print(\"result code: \", str(webUrl.getcode()))\n print(\"charset: \", webUrl.headers.get_param(\"charset\"))\n print()\n\n htmlText = webUrl.read().decode('utf-8')\n # print(text)\n\n typoRegex = \"\\\n<(?:Typo)?\\\\s+(?:word=\\\"(.*?)\\\"\\\\s+)?find=\\\"(.*?)\\\"\\\\s+replace=\\\"(.*?)\\\n\\\"\\\\s*/?>\"\n\n rules = regex.findall(typoRegex, htmlText)\n\n excludeDirs = set([\".git\"])\n excludeFiles = ('.eps', '.gif', '.png', '.jpg', '.svg')\n\n filelist = list()\n if os.path.exists(argv[1]):\n if os.path.isfile(argv[1]):\n if not os.path.islink(argv[1]):\n filelist.append(argv[1])\n elif os.path.isdir(argv[1]):\n for root, dirs, files in os.walk(argv[1], topdown=True):\n dirs[:] = [d for d in dirs if d not in excludeDirs]\n for file in files:\n if not file.endswith(excludeFiles):\n filelist.append(os.path.join(root, file))\n filelist.sort()\n\n print(\"Following files will be searched for typos:\")\n for file in filelist:\n print(file)\n print()\n\n typoCount = 0\n\n for file in filelist:\n print(\"Examining file {}\".format(file))\n\n fh = open(file, 'r')\n try:\n stext = fh.read()\n except UnicodeDecodeError as e:\n print(\"Read error: {}\".format(e))\n print()\n fh.close()\n continue\n else:\n fh.close()\n for rule in rules:\n rule_name = rule[0]\n rule_regex = rule[1]\n rule_subst = rule[2]\n for index, line in enumerate(stext.splitlines()):\n matchobj = regex.search(rule[1], line)\n if matchobj:\n typoCount = typoCount + 1\n\n # construct fixed line\n fixed = rule_subst\n for i in range(1, rule_subst.count(\"$\") + 1):\n if(matchobj.group(i) != None):\n fixed = fixed.replace(\"${}\".format(i),\n matchobj.group(i))\n else:\n fixed = fixed.replace(\"${}\".format(i), \"\")\n\n print(\"{}:{}\".format(file, index + 1))\n print(\"-\", line, sep=\"\")\n print(\"+\", line.replace(matchobj.group(), fixed),\n sep=\"\")\n print(\" Match:\", matchobj.group())\n print(\" Replacement:\", fixed)\n print(\" Rule:\", rule_name)\n print(\" Regex:\", rule_regex)\n print(\"Substitution:\", rule_subst)\n print()\n\n print(\"typoCount = {}\".format(typoCount))\n\nif __name__ == \"__main__\":\n main(sys.argv[0:])\n", "sub_path": "nitpicker.py", "file_name": "nitpicker.py", "file_ext": "py", "file_size_in_byte": 3179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 11, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 11, "usage_type": "name"}, {"api_name": "regex.findall", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.islink", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "regex.search", "line_number": 69, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 96, "usage_type": "attribute"}]} +{"seq_id": "201302187", "text": "import sys, time, os, pygame, random, math\r\n\r\nfrom pygame.locals import *\r\nfrom global_vars import *\r\nfrom functions import *\r\n\r\npygame.init()\r\npygame.mixer.init()\r\n\r\nscreen = pygame.Surface(screensize)\r\n\r\npygame.display.set_caption('Runner!')\r\nicon = pygame.image.load(os.path.join(images_dir, 'icon.png'))\r\npygame.display.set_icon(icon)\r\n\r\npygame.mouse.set_visible(False)\r\n\r\nclock = pygame.time.Clock()\r\nfps = 60\r\n\r\nFunc.playMusic('theme')\r\n\r\ndef main():\r\n\tmenu_index = 1\r\n\tButton.button_count = 0\r\n\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\tselected = False\r\n\tMsg('Runner', colours['black'], 90, (screen_width/2, 5))\r\n\tMsg('Press {} to select'.format(pygame.key.name(Globals.keys['Select']).upper()), colours['white'], 25, (screen_width-200, screen_height-50))\r\n\tButton('Begin', colours['blue'], 50, (screen_width/2, 204), 1, start)\r\n\tButton('Instructions', colours['blue'], 50, (screen_width/2, 284), 2, instr)\r\n\tButton('Options', colours['blue'], 50, (screen_width/2, 364), 3, options)\r\n\tButton('Quit', colours['blue'], 50, (screen_width/2, 444), 4, Func.quit)\r\n\r\n\twhile True:\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == Globals.keys['Down']:\r\n\t\t\t\t\tmenu_index += 1\r\n\t\t\t\t\tif menu_index > Button.button_count:\r\n\t\t\t\t\t\tmenu_index = 1\r\n\t\t\t\telif event.key == Globals.keys['Up']:\r\n\t\t\t\t\tmenu_index -= 1\r\n\t\t\t\t\tif menu_index <= 0:\r\n\t\t\t\t\t\tmenu_index = Button.button_count\r\n\t\t\t\telif event.key == Globals.keys['Select']:\r\n\t\t\t\t\tselected = True\r\n\t\t\t\telif event.key == K_k:\r\n\t\t\t\t\tFunc.playSound('dalekShout')\r\n\t\t\t\telif event.key == K_x:\r\n\t\t\t\t\tFunc.playSound('dalekAttack')\r\n\r\n\t\tmessages.update()\r\n\t\tbuttons.update(menu_index, selected)\r\n\r\n\t\tFunc.screenUpdate(screen, colours['red'])\r\n\t\tselected = False\r\n\r\ndef start():\r\n\tFunc.playMusic(intros[random.randrange(0,len(intros))], 1)\r\n\twhile True:\r\n\t\tif not pygame.mixer.music.get_busy():\r\n\t\t\tFunc.playMusic('I Am the Doctor')\r\n\t\t\tbreak\r\n\t\tclock.tick(fps)\r\n\r\n\tuser = Player(200, screen_height/2)\r\n\r\n\tTIMEREVENT = 25\r\n\tDMGEVENT = USEREVENT + 5\r\n\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\tcount = 3\r\n\r\n\tscreen.fill(colours['white'])\r\n\r\n\tpygame.time.set_timer(DMGEVENT, 500)\r\n\r\n\tpygame.key.set_repeat(1,10)\r\n\r\n\tspike = Spike(100, 500, 600)\r\n\r\n\twhile True:\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tkey = pygame.key.get_pressed()\r\n\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif key[Globals.keys['Up']]:\r\n\t\t\t\t\tuser.image.fill(colours['blue'])\r\n\r\n\t\t\t\tif key[Globals.keys['Attack']]:\r\n\t\t\t\t\tafter_life()\r\n\r\n\t\t\t\tif key[Globals.keys['Pause']]:\r\n\t\t\t\t\tPAUSEEVENT = USEREVENT+3\r\n\t\t\t\t\twhile True:\r\n\t\t\t\t\t\tmsg = Msg('Press {} to resume'.format(Globals.keys['Pause']), colours['grey'], 75, (screen_width/2, screen_height/2))\r\n\t\t\t\t\t\tfor event in pygame.event.get():\r\n\r\n\t\t\t\t\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\t\t\t\t\tFunc.quit()\r\n\r\n\t\t\t\t\t\t\telif event.type == KEYDOWN and event.key == Globals.keys['Pause']:\r\n\t\t\t\t\t\t\t\tmsg.update(None, None, '')\r\n\t\t\t\t\t\t\t\tmessages.draw(screen)\r\n\t\t\t\t\t\t\t\tFunc.screenUpdate(screen)\r\n\t\t\t\t\t\t\t\tpygame.time.set_timer(PAUSEEVENT, 1000)\r\n\t\t\t\t\t\t\t\tcount = 3\r\n\r\n\t\t\t\t\t\t\telif event.type == PAUSEEVENT:\r\n\t\t\t\t\t\t\t\tmsg.update(None, None, str(count))\r\n\t\t\t\t\t\t\t\tmessages.draw(screen)\r\n\t\t\t\t\t\t\t\tFunc.screenUpdate(screen)\r\n\t\t\t\t\t\t\t\tcount -= 1\r\n\r\n\t\t\t\t\t\tif count == 0:\r\n\t\t\t\t\t\t\tmessages.remove(msg)\r\n\t\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\t\tFunc.screenUpdate(screen)\r\n\r\n\t\tif user.dead:\r\n\t\t\tafter_life()\r\n\r\n\t\tplayer.update(screen, event)\r\n\t\tprint(user.hp, user.touch)\r\n\t\tmobs.update(user, event)\r\n\t\tpowerups.update()\n\t\tblocks.update()\r\n\r\n\t\tFunc.screenUpdate(screen)\r\n\t\tpygame.display.update()\r\n\t\tevent = None\r\n\t\tclock.tick(fps)\r\n\r\ndef instr():\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\tmenu_index = 1\r\n\tButton.button_count = 0\r\n\r\n\tselected = False\r\n\tButton('Back', colours['blue'], 50, (screen_width-60, 0), 1, back)\r\n\tMsg('How to play:', colours['blue'], 75, (198, -52))\r\n\tMsg('Run away from the evil mobs!\\n\\\r\nTo jump press \"{}\"\\n\\\r\nTo attack press \"{}\"\\n\\\r\n\\nThe goal of the game is to get as many coins as possible within\\nthe time limit.\\n\\\r\nYou have 5 minutes until the game stops.\\n\\\r\nWatch out for evil monsters and other obstacles.\\n\\\r\n\\nGood luck, you\\'ll need it...\\n'.format(pygame.key.name(Globals.keys['Up']), pygame.key.name(Globals.keys['Attack'])), colours['black'], 35, (220, 80))\r\n\twhile True:\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == Globals.keys['Select']:\r\n\t\t\t\t\tselected = True\r\n\r\n\t\tmessages.update()\r\n\t\tbuttons.update(menu_index, selected)\r\n\r\n\t\tFunc.screenUpdate(screen, colours['red'])\r\n\t\tselected = False\r\n\r\ndef back():\r\n\tfor g in Groups:\r\n\t\tg.empty()\r\n\tmain()\r\n\r\ndef options():\r\n\tpygame.mouse.set_visible(True)\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\r\n\tmenu_index = 1\r\n\tButton.button_count = 0\r\n\r\n\tselected = False\r\n\tMsg('Options', colours['black'], 72, (screen_width/2, -25))\r\n\tButton('Change keys', colours['blue'], 50, (screen_width/2, 132), 1, change_keys)\r\n\tButton('Sound', colours['blue'], 50, (screen_width/2, 214), 2, sound)\r\n\tButton('Back to main', colours['blue'], 50, (screen_width/2, 408), 3, back)\r\n\twhile True:\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == Globals.keys['Down']:\r\n\t\t\t\t\tmenu_index += 1\r\n\t\t\t\t\tif menu_index > Button.button_count:\r\n\t\t\t\t\t\tmenu_index = 1\r\n\t\t\t\telif event.key == Globals.keys['Up']:\r\n\t\t\t\t\tmenu_index -= 1\r\n\t\t\t\t\tif menu_index <= 0:\r\n\t\t\t\t\t\tmenu_index = Button.button_count\r\n\t\t\t\telif event.key == Globals.keys['Select']:\r\n\t\t\t\t\tselected = True\r\n\r\n\t\tmessages.update()\r\n\t\tbuttons.update(menu_index, selected)\r\n\r\n\t\tFunc.screenUpdate(screen, colours['white'])\r\n\t\tselected = False\r\n\r\n\r\ndef after_life():\r\n\tfor group in Groups:\r\n\t\tif group == player:\r\n\t\t\tcontinue\r\n\t\tgroup.empty()\r\n\r\n\tmenu_index = 1\r\n\r\n\tDEATHTIMEREVENT = USEREVENT+1\r\n\r\n\tfor user in player:\r\n\r\n\t\tdy = screen_height/2 - 14 - user.rect.centery\r\n\t\tdx = screen_width/2 - user.rect.centerx\r\n\r\n\t\tFunc.screenUpdate(screen, (255,255,255))\r\n\r\n\t\tcounter = 0\r\n\t\tpygame.mixer.music.stop()\r\n\t\tsnd = Func.playSound('playerDeath')\r\n\r\n\t\tt0 = pygame.time.get_ticks()\r\n\t\tt1 = pygame.time.get_ticks()\r\n\r\n\t\tvx = dx/19135\r\n\t\tvy = dy/19135\r\n\r\n\t\tif vx < 0.5 and vx > 0:\r\n\t\t\tvx = 1\r\n\t\telif vx > -0.5 and vx < 0:\r\n\t\t\tvx = -1\r\n\t\tif vy < 0.5 and vy > 0:\r\n\t\t\tvy = 1\r\n\t\telif vy > -0.5 and vy < 0:\r\n\t\t\tvy = -1\r\n\t\twhile pygame.time.get_ticks() - t0 < 19135:\r\n\t\t\ty = screen_height/2 - 14 - user.rect.centery\r\n\t\t\tx = screen_width/2 - user.rect.centerx\r\n\r\n\t\t\tx = x/(19135 - (pygame.time.get_ticks()-t0))\r\n\t\t\ty = y/(19135 - (pygame.time.get_ticks()-t0))\r\n\t\t\tif x > 1 or x < -1 or y > 1 or y < -1:\r\n\t\t\t\tdy = screen_height/2 - 14 - user.rect.centery\r\n\t\t\t\tdx = screen_width/2 - user.rect.centerx\r\n\r\n\t\t\t\tvx = dx/(19135 - (pygame.time.get_ticks()-t0))\r\n\t\t\t\tvy = dy/(19135 - (pygame.time.get_ticks()-t0))\r\n\r\n\t\t\t\tuser.rect.centerx += int(vx)\r\n\t\t\t\tuser.rect.centery += int(vy)\r\n\r\n\t\t\t\tFunc.screenUpdate(screen, (255,255,255))\r\n\r\n\t\t\telse:\r\n\t\t\t\tif user.rect.bottom >= screen_height and vy > 0 or user.rect.top <= 0 and vy < 0:\r\n\t\t\t\t\tvy = -vy\r\n\t\t\t\tif user.rect.right >= screen_width and vx > 0 or user.rect.left <= 0 and vx < 0:\r\n\t\t\t\t\tvx = -vx\r\n\r\n\t\t\t\tuser.rect.centerx += int(vx)\r\n\t\t\t\tuser.rect.centery += int(vy)\r\n\r\n\t\t\t\tFunc.screenUpdate(screen, (255,255,255))\r\n\t\t\tt1 = pygame.time.get_ticks()\r\n\t\t\tclock.tick(240)\r\n\r\n\t\tuser.rect.center = (screen_width/2, screen_height/2 + 14)\r\n\t\ti = Msg('I ', colours['red'], 75, (user.rect.centerx - 200, user.rect.top - 190))\r\n\r\n\t\tFunc.screenUpdate(screen, (191,191,191))\r\n\r\n\t\tpygame.time.wait(156)\r\n\r\n\t\tdont = Msg(\"don't \", colours['red'], 75, (i.rect.right, user.rect.top - 190), contrail, True)\r\n\t\tFunc.screenUpdate(screen, (127,127,127))\r\n\t\tpygame.time.wait(173)\r\n\r\n\t\twanna = Msg('wanna ', colours['red'], 75, (dont.rect.right, user.rect.top - 190), contrail, True)\r\n\t\tFunc.screenUpdate(screen, (63,63,63))\r\n\t\tpygame.time.wait(225)\r\n\r\n\t\tMsg('go', colours['red'], 75, (wanna.rect.right, user.rect.top - 190), contrail, True)\r\n\t\tFunc.screenUpdate(screen, (0,0,0))\r\n\t\tpygame.time.wait(700)\r\n\r\n\t\tpygame.time.set_timer(DEATHTIMEREVENT, 80)\r\n\r\n\t\tuser.rect.centery = screen_height/2\r\n\t\tFunc.screenUpdate(screen, colours['black'])\r\n\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\r\n\tselected = False\r\n\tButton.button_count = 0\r\n\tMsg('You died', colours['red'], 75, (screen_width/2, 50), os.path.join(font_dir, 'Crucifixion-Regular.ttf'))\r\n\tMsg('Score: {}'.format(user.score), colours['tardis'], 60, (screen_width/2, screen_height/2))\r\n\tButton('Back to main', colours['blue'], 50, (screen_width/2, screen_height-180), 1, back)\r\n\tButton('Quit game', colours['blue'], 50, (screen_width/2, screen_height-100), 2, Func.quit)\r\n\twhile True:\r\n\t\tfor u in player:\r\n\t\t\tu.explode()\r\n\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == Globals.keys['Down']:\r\n\t\t\t\t\tmenu_index += 1\r\n\t\t\t\t\tif menu_index > Button.button_count:\r\n\t\t\t\t\t\tmenu_index = 1\r\n\t\t\t\telif event.key == Globals.keys['Up']:\r\n\t\t\t\t\tmenu_index -= 1\r\n\t\t\t\t\tif menu_index <= 0:\r\n\t\t\t\t\t\tmenu_index = Button.button_count\r\n\t\t\t\telif event.key == Globals.keys['Select']:\r\n\t\t\t\t\tselected = True\r\n\r\n\t\tmessages.update()\r\n\t\tblocks.update()\r\n\t\tbuttons.update(menu_index, selected)\r\n\r\n\t\tFunc.screenUpdate(screen, colours['black'])\r\n\t\tselected = False\r\n\r\ndef change_keys():\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\r\n\tmenu_index = 1\r\n\tButton.button_count = 0\r\n\r\n\tselected = False\r\n\tButton('Reset to standard', colours['black'], 40 ,(screen_width - 160, 80), 1, reset_keys)\r\n\tButton('Back to main', colours['black'], 40 ,(screen_width - 160, 160), 2, back)\r\n\r\n\tMsg('Click on a white\\nbox to set a key', colours['black'], 40 , (screen_width-160, screen_height-160))\r\n\r\n\tType('Up', 50, (100, 1), Globals.keys['Up'])\r\n\tType('Down', 50, (100, 71), Globals.keys['Down'])\r\n\tType('Left', 50, (100, 141), Globals.keys['Left'])\r\n\tType('Right', 50, (100, 211), Globals.keys['Right'])\r\n\tType('Attack', 50, (100, 281), Globals.keys['Attack'])\r\n\tType('Select', 50, (100, 351), Globals.keys['Select'])\r\n\tType('Pause', 50, (100, 421), Globals.keys['Pause'])\r\n\tType('Esc', 50, (100, 491), Globals.keys['Esc'])\r\n\r\n\twhile True:\r\n\t\tmessages.update()\r\n\t\ttypes.update()\r\n\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == Globals.keys['Down']:\r\n\t\t\t\t\tmenu_index += 1\r\n\t\t\t\t\tif menu_index > Button.button_count:\r\n\t\t\t\t\t\tmenu_index = 1\r\n\t\t\t\telif event.key == Globals.keys['Up']:\r\n\t\t\t\t\tmenu_index -= 1\r\n\t\t\t\t\tif menu_index <= 0:\r\n\t\t\t\t\t\tmenu_index = Button.button_count\r\n\t\t\t\telif event.key == Globals.keys['Select']:\r\n\t\t\t\t\tselected = True\r\n\r\n\t\tbuttons.update(menu_index, selected)\r\n\r\n\t\tFunc.screenUpdate(screen, colours['tardis'])\r\n\r\ndef sound():\r\n\tpygame.mouse.set_visible(True)\r\n\tmessages.empty()\r\n\tbuttons.empty()\r\n\ttypes.empty()\r\n\tsliders.empty()\r\n\r\n\r\n\tmenu_index = 1\r\n\tButton.button_count = 0\r\n\r\n\tselected = False\r\n\tButton('Back to main', colours['black'], 40 ,(screen_width - 160, screen_height - 100), 1, back)\r\n\tMsg('Move the sliders using\\n\\\r\nyour mouse to adjust volume', colours['black'], 30, (150, 50))\r\n\r\n\tmusicVol = Slider((screen_width-400,200), (350, 50), colours['red'], 'Music volume', 50, pygame.mixer.music.get_volume())\r\n\tsoundVol = Slider((screen_width-400,400), (350, 50), colours['red'], 'Sound volume', 50, Globals.volume)\r\n\r\n\twhile True:\r\n\t\tmessages.update()\r\n\t\ttypes.update()\r\n\r\n\t\tpygame.mixer.music.set_volume(musicVol.value)\r\n\t\tGlobals.volume = soundVol.value\r\n\r\n\r\n\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif pygame.mouse.get_pressed()[0]:\r\n\t\t\t\tprint(pygame.mouse.get_pos())\r\n\t\t\tif event.type == QUIT or event.type == KEYDOWN and event.key == Globals.keys['Esc']:\r\n\t\t\t\tFunc.quit()\r\n\t\t\telif event.type == KEYDOWN:\r\n\t\t\t\tif event.key == Globals.keys['Select']:\r\n\t\t\t\t\tselected = True\r\n\r\n\t\tbuttons.update(menu_index, selected)\r\n\t\tsliders.update()\r\n\r\n\t\tFunc.screenUpdate(screen, colours['tardis'])\r\n\r\ndef reset_keys():\r\n\tGlobals.keys = keys_standard\r\n\tback()\r\n\r\nmain()\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 12692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 41, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.mixer.music.get_busy", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.key.set_repeat", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 121, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 146, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pygame.key.name", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 189, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 189, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 245, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 245, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 248, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 248, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 249, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 262, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 262, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 266, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 267, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 267, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 272, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 272, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 273, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 273, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 290, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 290, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 302, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 306, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 306, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 310, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 310, "usage_type": "attribute"}, {"api_name": "pygame.time.set_timer", "line_number": 312, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 312, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 333, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 333, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 385, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 385, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 405, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 405, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.get_volume", "line_number": 420, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 420, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 427, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 427, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 432, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 432, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 433, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 433, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 434, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 434, "usage_type": "attribute"}]} +{"seq_id": "176235235", "text": "from django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom umauma_happy_app.models import *\nfrom umauma_happy_app.utils import analysis\nfrom collections import OrderedDict\nimport datetime\nimport time\nimport io, sys\n\n\nclass SampleValues:\n analysis_number_samples = [100, 200, 500, 1000, 2000, 5000]\n\n\nclass ScheduledSample:\n start_delta = 0\n end_delta = 1000\n\n\ndef index(request):\n \"\"\"\n 他者分析の分析内容の選択画面\n :param request: Request\n :return render: with Request request, Dictionary context\n \"\"\"\n context = {'analysis_number_samples': SampleValues.analysis_number_samples,\n 'weight_amount': len(analysis.get_weight())}\n return render(request, 'social_analysis/index.html', context)\n\n\ndef calculate(request, analysis_number=None):\n \"\"\"\n 他者分析の全ユーザー要素別的中率の表示\n :param request: Request\n :param analysis_number: int\n :return render: with Request request, Dictionary context\n \"\"\"\n pre_time = time.time() # 経過時間表示用\n weights = analysis.get_weight(analysis_number)\n factor_count = analysis.count_factor(weights)\n context = {'analysis_number_samples': SampleValues.analysis_number_samples,\n 'factor_count': factor_count,\n 'analysis_number': analysis_number,\n 'calculation_duration': time.time() - pre_time}\n return render(request, 'social_analysis/calculate.html', context)\n\n\ndef calculate_by_period(request, start, end):\n \"\"\"\n 他者分析の全ユーザー該当期間の要素別的中率の表示\n :param request: Request\n :param start: String(YYYY-MM-DD HH:MM:ss)\n :param end: String(YYYY-MM-DD HH:MM:ss)\n :return render: with Request request, Dictionary context\n \"\"\"\n pre_time = datetime.datetime.now() # 経過時間表示用\n race_list = analysis.get_race_by_period(start, end)\n count_factor_by_races(race_list)\n context = {'analysis_number_samples': SampleValues.analysis_number_samples,\n 'analysis_start': start,\n 'analysis_end': end,\n 'analysis_race_number': len(race_list),\n 'calculation_duration': datetime.datetime.now() - pre_time}\n return render(request, 'social_analysis/calculate.html', context)\n\n\ndef calculate_remaining(request):\n \"\"\"\n 未処理のレースに対して,的中率を計算する\n :param request: Request\n :return factor_counter: Dictionary\n \"\"\"\n pre_time = datetime.datetime.now() # 経過時間表示用\n reservation_race_list = []\n race_list = Race.objects.all()\n for race in race_list:\n if is_calculated_factor_aggregate(race) is False:\n reservation_race_list.append(race)\n count_factor_by_races(reservation_race_list)\n context = {'analysis_number_samples': SampleValues.analysis_number_samples,\n 'analysis_race_number': len(reservation_race_list),\n 'calculation_duration': datetime.datetime.now() - pre_time}\n return render(request, 'social_analysis/calculate.html', context)\n\n\ndef show_all_aggregate(request):\n \"\"\"\n 計算済みの要素別的中率を要素別に表示(全期間)\n :param request:\n :return:\n \"\"\"\n pre_time = time.time() # 経過時間表示用\n summarize_past = summarize_past_race_aggregate()\n summarize_future = summarize_future_race_aggregate()\n\n context = {'analysis_number_samples': SampleValues.analysis_number_samples,\n 'factor_count_past': summarize_past['factor_counter'],\n 'factor_count_future': summarize_future['factor_counter'],\n 'analysis_number_past': summarize_past['analysis_number'],\n 'analysis_number_future': summarize_future['analysis_number'],\n 'analysis_race_number_past': summarize_past['analysis_race_number'],\n 'analysis_race_number_future': summarize_future['analysis_race_number'],\n 'calculation_duration': time.time() - pre_time}\n return render(request, 'social_analysis/calculate.html', context)\n\n\ndef is_calculated_factor_aggregate(race, factor=None):\n \"\"\"\n 与えられたレースの全ユーザの使用率が計算済みか判定\n :param race: Object\n :return: boolean\n \"\"\"\n if factor is None:\n analysis_data_list = list(EntireFactorAggregate.objects.filter(race_id=race.id))\n if len(analysis_data_list) == 0:\n return False\n else:\n return True\n else:\n analysis_data_list = list(EntireFactorAggregate.objects.filter(race_id=race.id, factor_id=factor.id))\n if len(analysis_data_list) == 0:\n return False\n else:\n return True\n\n\ndef save(factor_count, race):\n \"\"\"\n 全ユーザーの要素別使用回数,的中回数,的中率をレース毎にDBに保存\n :param factor_count: Dictionary\n :param race: String or Datetime\n :return:\n \"\"\"\n for key, value in factor_count.items():\n if is_calculated_factor_aggregate(race, key):\n analysis_data_list = list(EntireFactorAggregate.objects.filter(factor_id=key.id, race_id=race.id))\n analysis_data = analysis_data_list[0]\n else:\n analysis_data = EntireFactorAggregate()\n if value.keys() >= {'hit', 'hit_percentage'}: # hit,hit_percentageが格納されている時\n analysis_data.hit = value['hit']\n analysis_data.hit_percentage = value['hit_percentage']\n analysis_data.use = value['use']\n analysis_data.use_percentage = value['use_percentage']\n analysis_data.factor_id = key.id\n analysis_data.race_id = race.id\n analysis_data.save()\n print(f'{datetime.datetime.now()} | Complete saving {len(factor_count)} data in EntireFactorAggregate.')\n return\n\n\ndef count_factor_by_races(race_list):\n \"\"\"\n 与えられたRaceリストを指定しているweightの的中率等を計算し, レース毎にDBに保存\n :param race_list: List\n :return factor_counter: Dictionary\n \"\"\"\n for race in race_list:\n weights = analysis.get_weight_by_race(race)\n if analysis.is_not_null_rank_in_data(race):\n factor_counter = analysis.count_factor(weights)\n else:\n factor_counter = analysis.count_factor_only_use(weights)\n save(factor_counter, race)\n return\n\n\ndef summarize_past_race_aggregate():\n \"\"\"\n EntireFactorAggregateの過去レースに関し��,的中率、使用率、使用回数、的中回数を集計する\n :return compact: Dictionary\n \"\"\"\n factor_list_all = list(Factor.objects.all())\n factor_counter_past = analysis.init_factor_counter()\n analysis_number_past = 0\n # 過去レースに関する計算結果を取得\n analysis_data_list_past = list(EntireFactorAggregate.objects.filter(hit__isnull=False))\n for analysis_data in analysis_data_list_past:\n factor_counter_past[analysis_data.factor]['use'] += analysis_data.use\n factor_counter_past[analysis_data.factor]['hit'] += analysis_data.hit\n analysis_number_past += analysis_data.use\n # 的中率と使用率を計算\n factor_counter_past = analysis.calculate_use_percentage(factor_counter_past, factor_list_all)\n factor_counter_past = analysis.calculate_hit_percentage(factor_counter_past, factor_list_all)\n compact = {'factor_counter': factor_counter_past,\n 'analysis_number': analysis_number_past,\n 'analysis_race_number': int(len(analysis_data_list_past) / len(factor_list_all))}\n return compact\n\n\ndef summarize_future_race_aggregate():\n \"\"\"\n EntireFactorAggregateの過去レースに関して,使用率、使用回数、を集計する\n :return compact: Dictionary\n \"\"\"\n factor_list_all = list(Factor.objects.all())\n factor_counter_future = analysis.init_factor_counter_only_use()\n analysis_number_future = 0\n # 未来レースに関する計算結果を取得\n analysis_data_list_future = list(EntireFactorAggregate.objects.filter(hit__isnull=True))\n for analysis_data in analysis_data_list_future:\n factor_counter_future[analysis_data.factor]['use'] += analysis_data.use\n analysis_number_future += analysis_data.use\n # 使用率を計算\n factor_counter_future = analysis.calculate_use_percentage(factor_counter_future, factor_list_all)\n compact = {'factor_counter': factor_counter_future,\n 'analysis_number': analysis_number_future,\n 'analysis_race_number': int(len(analysis_data_list_future) / len(factor_list_all))}\n return compact\n\n\ndef scheduled_calculate():\n \"\"\"\n バッチ処理の内容[未来のレースについて毎日使用率を計算する]\n 'python manage.py crontab add'を実行すると setting.py で設定した間隔で実行される\n 'python manage.py crontab remove'を実行するとバッチが全解除される\n 'python manage.py crontab show'を実行すると登録されているバッチが確認できる\n :return:\n \"\"\"\n sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') # ログ出力の文字コードをセット\n pre_time = time.time()\n today = datetime.date.today()\n start_time = today + datetime.timedelta(days=ScheduledSample.start_delta)\n end_time = today + datetime.timedelta(days=ScheduledSample.end_delta)\n race_list = analysis.get_race_by_period(start_time, end_time)\n print(f'{datetime.datetime.now()} | Start calculate {len(race_list)}Races in {start_time} ~ {end_time}')\n count_factor_by_races(race_list)\n print(f'{datetime.datetime.now()} | Complete calculate {len(race_list)}Races in {start_time} ~ {end_time}'\n f'Run Time : {time.time() - pre_time:.5}s')\n", "sub_path": "social_analysis/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "umauma_happy_app.utils.analysis.get_weight", "line_number": 26, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis.get_weight", "line_number": 38, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 38, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.count_factor", "line_number": 39, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 39, "usage_type": "name"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "attribute"}, {"api_name": "umauma_happy_app.utils.analysis.get_race_by_period", "line_number": 56, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 56, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 91, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 147, "usage_type": "attribute"}, {"api_name": "umauma_happy_app.utils.analysis.get_weight_by_race", "line_number": 158, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 158, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.is_not_null_rank_in_data", "line_number": 159, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 159, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.count_factor", "line_number": 160, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 160, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.count_factor_only_use", "line_number": 162, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 162, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.init_factor_counter", "line_number": 173, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 173, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.calculate_use_percentage", "line_number": 182, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 182, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.calculate_hit_percentage", "line_number": 183, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 183, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.init_factor_counter_only_use", "line_number": 196, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 196, "usage_type": "name"}, {"api_name": "umauma_happy_app.utils.analysis.calculate_use_percentage", "line_number": 204, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 204, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 219, "usage_type": "attribute"}, {"api_name": "io.TextIOWrapper", "line_number": 219, "usage_type": "call"}, {"api_name": "time.time", "line_number": 220, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 221, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 223, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis.get_race_by_period", "line_number": 224, "usage_type": "call"}, {"api_name": "umauma_happy_app.utils.analysis", "line_number": 224, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 225, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 227, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "248808782", "text": "from typing import List\n\nfrom presidio_analyzer import AnalyzerEngine, RecognizerResult\n\nfrom presidio_image_redactor import OCR, TesseractOCR\nfrom presidio_image_redactor.entities import ImageRecognizerResult\n\n\nclass ImageAnalyzerEngine:\n \"\"\"ImageAnalyzerEngine class.\n\n :param analyzer_engine: The Presidio AnalyzerEngine instance\n to be used to detect PII in text\n :param ocr: the OCR object to be used to detect text in images.\n \"\"\"\n\n def __init__(self, analyzer_engine: AnalyzerEngine = None, ocr: OCR = None):\n if not analyzer_engine:\n analyzer_engine = AnalyzerEngine()\n self.analyzer_engine = analyzer_engine\n\n if not ocr:\n ocr = TesseractOCR()\n self.ocr = ocr\n\n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n\n :param image: PIL Image/numpy array or file path(str) to be processed\n :param kwargs: Additional values for the analyze method in AnalyzerEngine\n\n :return: list of the extract entities with image bounding boxes\n \"\"\"\n ocr_result = self.ocr.perform_ocr(image)\n text = self.ocr.get_text_from_ocr_dict(ocr_result)\n\n analyzer_result = self.analyzer_engine.analyze(\n text=text, language=\"en\", **kwargs\n )\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n return bboxes\n\n @staticmethod\n def map_analyzer_results_to_bounding_boxes(\n text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str\n ) -> List[ImageRecognizerResult]:\n \"\"\"Map extracted PII entities to image bounding boxes.\n\n Matching is based on the position of the recognized entity from analyzer\n and word (in ocr dict) in the text.\n\n :param text_analyzer_results: PII entities recognized by presidio analyzer\n :param ocr_result: dict results with words and bboxes from OCR\n :param text: text the results are based on\n\n return: list of extracted entities with image bounding boxes\n \"\"\"\n if (not ocr_result) or (not text_analyzer_results):\n return []\n\n bboxes = []\n proc_indexes = 0\n indexes = len(text_analyzer_results)\n\n pos = 0\n iter_ocr = enumerate(ocr_result[\"text\"])\n for index, word in iter_ocr:\n if not word:\n pos += 1\n else:\n for element in text_analyzer_results:\n text_element = text[element.start : element.end]\n # check position and text of ocr word matches recognized entity\n if (\n max(pos, element.start) < min(element.end, pos + len(word))\n ) and ((text_element in word) or (word in text_element)):\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n\n # add bounding boxes for all words in ocr dict\n # contained within the text of recognized entity\n # based on relative position in the full text\n while pos + len(word) < element.end:\n index, word = next(iter_ocr)\n if word:\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n pos += len(word) + 1\n proc_indexes += 1\n\n if proc_indexes == indexes:\n break\n pos += len(word) + 1\n\n return bboxes\n", "sub_path": "presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py", "file_name": "image_analyzer_engine.py", "file_ext": "py", "file_size_in_byte": 4726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "presidio_analyzer.AnalyzerEngine", "line_number": 17, "usage_type": "name"}, {"api_name": "presidio_image_redactor.OCR", "line_number": 17, "usage_type": "name"}, {"api_name": "presidio_analyzer.AnalyzerEngine", "line_number": 19, "usage_type": "call"}, {"api_name": "presidio_image_redactor.TesseractOCR", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "presidio_image_redactor.entities.ImageRecognizerResult", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "presidio_analyzer.RecognizerResult", "line_number": 47, "usage_type": "name"}, {"api_name": "presidio_image_redactor.entities.ImageRecognizerResult", "line_number": 80, "usage_type": "call"}, {"api_name": "presidio_image_redactor.entities.ImageRecognizerResult", "line_number": 99, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 48, "usage_type": "name"}, {"api_name": "presidio_image_redactor.entities.ImageRecognizerResult", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "496696582", "text": "__author__ = 'carstens'\n\n\nimport webapp2\nimport math\n\nclass IndexHandler(webapp2.RequestHandler):\n \"\"\"\n Controller for the Index page\n \"\"\"\n\n def get(self):\n self.calcSqrt()\n\n def post(self):\n self.calcSqrt()\n\n def calcSqrt(self):\n x = self.request.get('x')\n if (x == ''):\n x = 2\n self.response.out.write(math.sqrt(float(x)))\n\n\napp = webapp2.WSGIApplication([\n ('.*/sqrt.*', IndexHandler)\n], debug=True)", "sub_path": "attic/webmath/sqrt.py", "file_name": "sqrt.py", "file_ext": "py", "file_size_in_byte": 469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "webapp2.RequestHandler", "line_number": 7, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "webapp2.WSGIApplication", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "19386522", "text": "# coding: utf-8\n\"\"\"\nWorker process\n\"\"\"\nfrom http_handler import handle\n\n\nimport sys\nimport logging\n\nfrom rainbow_logging_handler import RainbowLoggingHandler\n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"[%(asctime)s] %(name)s %(funcName)s():%(lineno)d\\t%(message)s\")\nhandler = RainbowLoggingHandler(\n sys.stderr,\n color_pathname=('black', 'red', True), color_module=('yellow', None, False),\n color_funcName=('blue', 'white', True), color_lineno=('green', None, False)\n)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nCOMMANDS = {\n 'OFFER': 0,\n 'ACCEPT': 1,\n 'ACKNOWLEDGEMENT': 2,\n 'ERROR': 3\n}\n\n\nclass Worker:\n\n def __init__(self, pipe, listen_socket, root_dir):\n self._is_free = True\n self._pipe = pipe\n self._listen_socket = listen_socket\n self._root_dir = root_dir\n\n def wait_and_handle_client(self):\n while True:\n command = self._pipe.recv()\n if command == COMMANDS['OFFER']:\n connection, address = self._listen_socket.accept()\n handle(connection, self._root_dir)\n connection.close()\n self._pipe.send(COMMANDS['ACKNOWLEDGEMENT'])\n\n @property\n def is_free(self):\n return self._is_free\n\n def make_free(self):\n self._is_free = True\n\n def make_disturbed(self):\n self._is_free = False\n", "sub_path": "worker.py", "file_name": "worker.py", "file_ext": "py", "file_size_in_byte": 1431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 16, "usage_type": "call"}, {"api_name": "rainbow_logging_handler.RainbowLoggingHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 18, "usage_type": "attribute"}, {"api_name": "http_handler.handle", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "200294194", "text": "# class DatasetGenerator:\n# def __init__(self,url,apikey):\n# self.url = url\n# self.apikey =apikey\n# self.data = None\n# def get_data(self, ):\n#\n# class JsonParser:\n# def __init__(self):\n# pass\n# def parse(self):\n# pass\nimport math\nimport requests\nfrom tqdm import tqdm\nimport concurrent.futures\nfrom backend import config\nfrom backend.db import EventDataBase\nimport datetime\nwith open(\"artist.txt\", \"r\") as f:\n lines = f.readlines()\n lines = [int(line[:-1].split(' ', 1)[0]) for line in lines]\n #print(lines)\nurl_template = \"https://api.songkick.com/api/3.0/artists/substitution/gigography.json?apikey=io09K9l3ebJxmxe2\"\nurl_list = []\n# for art_id in tqdm(lines):\n#\n#\n# url = url_template.replace('substitution', str(art_id))\n# res = requests.get(url)\n# data = res.json()\n#\n# tot = data['resultsPage']['totalEntries']\n# perPage = data['resultsPage'][\"perPage\"]\n# if tot == 0:\n# continue\n# total_page = math.ceil(tot / perPage)\n# url_list.append(url)\n#\n# for j in range(2, total_page + 1):\n# new_url = url + \"&page=%d\" % (j)\n# url_list.append(new_url)\ndef parse(data):\n\n return values\ndef process(art_id):\n ret = []\n url = url_template.replace('substitution', str(art_id))\n res = requests.get(url)\n data = res.json()\n\n tot = data['resultsPage']['totalEntries']\n perPage = data['resultsPage'][\"perPage\"]\n if tot == 0:\n return []\n total_page = math.ceil(tot / perPage)\n for event in data['resultsPage']['results']['event']:\n idx = event['id']\n name = event['displayName']\n date = event['start']['date'] if not event['start']['datetime'] else event['start']['datetime']\n city = event['location']['city']\n long = event['location']['lng']\n lat = event['location']['lat']\n values = dict()\n values['id'] = idx\n values['name'] = name\n values['long'] = long\n values['lat'] = lat\n values['city'] = city\n values['date'] = date\n ret.append(values)\n #DB.insert(values)\n\n for j in range(2, total_page + 1):\n\n new_url = url + \"&page=%d\" % (j)\n #print(new_url)\n res = requests.get(new_url)\n data = res.json()\n for event in data['resultsPage']['results']['event']:\n idx = event['id']\n name = event['displayName']\n\n date = event['start']['date'] if not event['start']['datetime'] else event['start']['datetime']\n city = event['location']['city']\n long = event['location']['lng']\n lat = event['location']['lat']\n\n values = dict()\n values['id'] = idx\n values['name'] = name\n values['long'] = long\n values['lat'] = lat\n values['city'] = city\n values['date'] = date\n ret.append(values)\n\n return ret\n\n\n\n# print(\"writing file\")\n# outF = open(\"events.txt\", \"w\")\n# for v in res:\n# line = \"%s\\n\" %v\n# outF.write(line)\n# outF.close()\nif __name__ == \"__main__\":\n\n res = []\n errors = []\n DB = EventDataBase(config.db_connect)\n with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:\n # Start the load operations and mark each future with its URL\n future_to_url = {executor.submit(process, art_id):art_id for art_id in lines[:]}\n\n for future in tqdm(concurrent.futures.as_completed(future_to_url)):\n try:\n for value in future.result():\n\n date = value['date']\n time = date.split('T')[0].split('-')\n DB.insert(value)\n except Exception:\n errors.append(future_to_url[future])\n #a = [process(art_id) for art_id in lines[:5]]\n", "sub_path": "backend/data_gen.py", "file_name": "data_gen.py", "file_ext": "py", "file_size_in_byte": 3813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 78, "usage_type": "call"}, {"api_name": "backend.db.EventDataBase", "line_number": 112, "usage_type": "call"}, {"api_name": "backend.config.db_connect", "line_number": 112, "usage_type": "attribute"}, {"api_name": "backend.config", "line_number": 112, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 113, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 113, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 113, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 117, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.as_completed", "line_number": 117, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 117, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "6910111", "text": "\"\"\" This module contains miscellaneous helper functions \"\"\"\nfrom configparser import ConfigParser\nfrom time import strftime\nimport os\nimport json\nimport logging\nimport requests\n\ndef get_config_filepath(filename='ldp_config.ini'):\n \"\"\" Read the config file's path from env if it exists \"\"\"\n from_env = os.getenv('LDP_CONFIG_FILE')\n if from_env is not None:\n return from_env\n else:\n # one directory up from this file\n return os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', filename)\n\ndef config(section, filename='ldp_config.ini'):\n \"\"\" Read the config and return it as a dictionary \"\"\"\n parser = ConfigParser()\n parser.read(get_config_filepath())\n\n config_map = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n config_map[param[0]] = param[1]\n else:\n raise Exception('Section {0} not found in the {1} file'.format(section, filename))\n\n return config_map\n\ndef camelcase(kebab_str):\n \"\"\" If input is 'fixed-due-date-schedule', output is 'fixedDueDateSchedule' \"\"\"\n tokens = kebab_str.split('-')\n return tokens[0] + ''.join(x.title() for x in tokens[1:])\n\ndef okapi_login():\n \"\"\" Get an Okapi token using credentials in the config file \"\"\"\n params = config(section='extraction')\n url = params['okapi_url'] + '/authn/login'\n payload = {\n 'username': params['admin_user'],\n 'password': params['admin_pass']\n }\n headers = {\n 'X-Okapi-Tenant': params['okapi_tenant'],\n 'Content-Type': 'application/json'\n }\n try:\n resp = requests.post(url, data=json.dumps(payload), headers=headers)\n except requests.exceptions.ConnectionError:\n logging.debug('Request URL: %s', url)\n logging.error('''\\nError: Request to FOLIO failed.\nCheck that URL is correct and the server is up.\\n''')\n raise SystemExit()\n if resp.status_code != 201:\n logging.debug('[%d] Error logging in. Resp: %s', resp.status_code, resp.text)\n else:\n logging.debug('Logged in as %s', params['admin_user'])\n return resp.headers['x-okapi-token']\n return None\n\ndef project_dir():\n \"\"\" Returns the project directory \"\"\"\n src_dir = os.path.dirname(os.path.realpath(__file__))\n return os.path.dirname(src_dir)\n\ndef get_path(path):\n \"\"\" If path is relative, it is relative to the project directory \"\"\"\n if os.path.isabs(path):\n return path\n else:\n return os.path.join(project_dir(), path)\n\ndef extraction_config(okapi_token):\n \"\"\" Reads and returns the 'extraction' config \"\"\"\n params = config(section='extraction')\n okapi_url = params['okapi_url']\n\n headers = {\n 'X-Okapi-Tenant': params['okapi_tenant'],\n 'X-Okapi-Token': okapi_token\n }\n pretty_print = None\n if bool(params['pretty_print_output']):\n pretty_print = 2\n output_dir = get_path(params['output_dir'])\n schema_file = get_path(params['schema_create_file'])\n return (okapi_url, headers, pretty_print, output_dir, schema_file)\n\n\ndef make_dir(dir_path):\n \"\"\" Make the directory, ignore error if it already exists \"\"\"\n try:\n os.makedirs(dir_path)\n except FileExistsError:\n pass\n return dir_path\n\ndef make_timestamped_dir(output_dir, time):\n \"\"\" Make the directory where the extraction output will go \"\"\"\n dir_path = os.path.join(output_dir, strftime(\"%Y%m%d_%H%M%S\", time))\n return make_dir(dir_path)\n", "sub_path": "postgresDemo/src/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 3285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 52, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "545256392", "text": "# -*- coding: utf-8 -*-\n# @Time : 19-3-26\n# @Author : hay\nimport traceback\nfrom core.soapApi import WebpowerSoapBase\nfrom sendblaster.rq.baseapi import BaseApi\nfrom emails.models import EmailSummariz\nfrom jobs.models import Teachers\nfrom sendblaster import models as sd_model\n\n\nclass WebpowerSoap(BaseApi):\n\n def __init__(self, url, login, *args, **kwargs):\n self.instance = WebpowerSoapBase(url, login, *args, **kwargs)\n\n def apitest(self):\n return self.instance.test_connection()\n\n def run(self):\n pass\n\n def saveBrands(self, manage_instance, *args, **kwargs):\n brands = self.SaopResult(self.instance.getBrands())\n self.tag = 'saveBrands'\n self.printlog('[saveBrands]: {}'.format(len(brands)))\n for brand in brands:\n item = dict()\n item['fid'] = self.getattr(brand, 'id')\n item['name'] = self.getattr(brand, 'name')\n item['Platforms_id'] = manage_instance.platformqueryset.id\n ret = self.save_date(sd_model.Brand, item, filter={\n 'Platforms_id': manage_instance.platformqueryset.id,\n 'fid': item['fid']\n })\n # print(brands)\n\n def saveCampaign(self, manage_instance, *args, **kwargs):\n\n campaigns = self.SaopResult(self.instance.getCampaigns())\n self.tag = 'saveCampaign'\n self.printlog('[saveCampaign]: {}'.format(len(campaigns)))\n brands = sd_model.Brand.objects.filter(status__gt=0, Platforms_id=manage_instance.platformqueryset.id).all()\n if not brands:\n print(' not found brands exist!')\n return False\n\n for campaign in campaigns:\n item = dict()\n Brands_id = self.getattr(campaign, 'brand_id')\n if not Brands_id:\n continue\n\n brand = self.find_querySet(brands, find_filed='fid', find_value=Brands_id)\n if not brand:\n continue\n bid = brand.id\n\n item['Brands_id'] = bid\n item['fid'] = self.getattr(campaign, 'id')\n item['name'] = self.getattr(campaign, 'name')\n item['is_overall'] = self.getattr(campaign, 'is_overall')\n item['is_active'] = self.getattr(campaign, 'is_active')\n item['has_soap_api'] = self.getattr(campaign, 'has_soap_api')\n item['remark'] = self.getattr(campaign, 'remarks')\n item['create_date'] = self.to_date('{} 00:00:00'.format(self.getattr(campaign, 'create_date')))\n\n self.save_date(sd_model.Campaign, item, filter={\n 'fid': item['fid'], 'Brands_id': bid\n })\n\n def saveMailings(self, manage_instance, *args, **kwargs):\n self.tag = 'saveMailings'\n\n where = {'Brands__Platforms_id': manage_instance.platformqueryset.id, 'is_active': 1, 'has_soap_api': 1}\n if kwargs.get('other'):\n if kwargs['other'].get('campaignQ'):\n where.update({'id': kwargs['other'].get('campaignQ').id})\n\n campaigns = sd_model.Campaign.objects.filter(\n **where\n ).all()\n\n if not campaigns:\n print('[not found campaigns] exist!')\n return False\n self.printlog('[getcampaigns]: len:{}'.format(len(campaigns)))\n for campaign in campaigns:\n\n self.printlog('[for-> campaign]: name:{}, id:{}'.format(campaign.name, campaign.id))\n\n # TODO\n mailings = self.SaopResult(self.instance.getMailings(campaign.fid, limit=0, definitiveOnly=True))\n self.printlog('[getMailings]: test_len:{}'.format(len(mailings)))\n for mail in mailings:\n item = dict()\n item['Campaigns_id'] = campaign.id\n item['fid'] = self.getattr(mail, 'id')\n item['name'] = self.getattr(mail, 'name')\n item['subject'] = self.getattr(mail, 'subject')\n item['preheader'] = self.getattr(mail, 'preheader')\n item['kind'] = self.getattr(mail, 'kind')\n item['priority'] = self.getattr(mail, 'priority')\n item['from_name'] = self.getattr(mail, 'from_name')\n item['sender_id'] = self.getattr(mail, 'sender_id')\n item['plaintext_msg'] = self.getattr(mail, 'plaintext_msg')\n item['attachment'] = self.getattr(mail, 'attachment')\n item['optin_confirm_candidate'] = self.getattr(mail, 'optin_confirm_candidate')\n item['history'] = self.getattr(mail, 'history')\n item['last_mod_date'] = self.getattr(mail, 'last_mod_date')\n item['last_def_sent_date'] = self.getattr(mail, 'last_def_sent_date')\n item['last_mod_user'] = self.getattr(mail, 'last_mod_user')\n item['weblink'] = self.getattr(mail, 'weblink')\n\n mailingstatssummary = self.instance.getMailingStatsSummary(campaign.fid, item.get('fid'))\n if mailingstatssummary.get('result'):\n item.update(mailingstatssummary.get('result'))\n try:\n del item['click2open_rate']\n except:\n pass\n\n mailingQ = self.save_date(sd_model.Mailing, item, filter={\n 'Campaigns_id': campaign.id, 'fid': item['fid']\n })\n if mailingQ.is_email_mailing == 1:\n self.printlog('已更新细则,跳过!')\n continue\n # 启动模式为simple则不爬取细则\n if manage_instance.querydict.get('model') and manage_instance.querydict.get('model') == 'simple':\n self.printlog(mailingQ, '{}{}不细则'.format(self.tag, '[saveMailings]'))\n continue\n self.printlog(mailingQ, '{}{}细则'.format(self.tag, '[saveMailings]'))\n self.__save_recipients_all(campaign, mailingQ)\n # break\n\n # mailings = self.SaopResult(self.instance.getMailings())\n\n def saveMailing(self, manage_instance, *args, **kwargs):\n if kwargs.get('other'):\n if kwargs['other'].get('mailingQ'):\n mailingQ = kwargs['other'].get('mailingQ')\n\n if mailingQ.is_email_mailing == 0:\n campaign_fid = mailingQ.Campaigns.fid\n campaign_id = mailingQ.Campaigns.id\n mailing_fid = mailingQ.fid\n\n mailingstatssummary = self.instance.getMailingStatsSummary(campaign_fid, mailing_fid)\n\n if mailingstatssummary.get('result'):\n item = dict()\n item.update(mailingstatssummary.get('result'))\n try:\n del item['click2open_rate']\n except:\n pass\n\n mailingQ = self.save_date(sd_model.Mailing, item, filter={\n 'Campaigns_id': campaign_id, 'fid': mailing_fid\n })\n if manage_instance.querydict.get('model') != 'simple':\n self.printlog(mailingQ, '{}{}细则'.format(self.tag, '[saveMailings]'))\n try:\n self.__save_recipients_all(mailingQ.Campaigns, mailingQ)\n except:\n self.printlog(str(traceback.format_exc()))\n else:\n self.printlog('{}-{} 已更新细则,跳过!'.format(mailingQ.name, mailingQ.id))\n\n def __save_recipients_all(self, campaign, mailingQ):\n \"\"\"\n 处理投递任务中的所有邮箱\n :param campaign: 活动对象\n :param mailingQ: 群发任务QuerySet对象\n :param recipients: 投递任务群发的所有邮箱集合\n :param mailingbounce: 投递失败邮箱集合\n :param mailingresponse: 投递成功各种状态集合 open, click, trigger\n :return:\n \"\"\"\n recipients = self.SaopResult(\n self.instance.getRecipients(campaign.fid, mailingQ.fid)\n )\n self.printlog('[getRecipients] {}'.format(len(recipients)))\n recipientsQ = self.__save_recipients(campaign, mailingQ, recipients)\n\n bounce_date = mailingQ.last_mod_date.strftime(\"%Y-%m-%d\")\n mailingbounce = self.SaopResult(\n self.instance.getMailingBounce(campaign.fid, mailingQ.fid, bounce_date)\n )\n self.printlog('[getMailingBounce]', len(mailingbounce))\n\n mailingresponse = self.SaopResult(\n self.instance.getMailingResponse(campaign.fid, mailingQ.fid)\n )\n self.printlog('[getMailingResponse]', len(mailingresponse))\n # print('mailingresponse', len(mailingresponse), mailingresponse)\n\n self.__save_emailmailing(recipientsQ, mailingQ, mailingbounce, mailingresponse)\n sd_model.Mailing.objects.filter(id=mailingQ.id).update(**{'is_email_mailing': 1})\n\n def __save_recipients(self, campaign, mailingQ, recipients):\n self.printlog('[__save_recipients]')\n rec_email = []\n for rec in recipients:\n try:\n rec_email.append(rec.fields[0].value.lower())\n except:\n continue\n self.printlog('[getrecipients_email]: {}'.format(len(rec_email)))\n try:\n self.recipientsQ = list(sd_model.SendEmail.objects.filter(email__in=rec_email).all())\n except:\n self.recipientsQ = []\n\n existemail = {}\n [existemail.update({r.email.lower(): r.email.lower()}) for r in self.recipientsQ]\n\n # 客户id\n teacherList = EmailSummariz.objects.filter(email__in=list(existemail.keys()), status__gt=0).all()\n teacher_email = {}\n [teacher_email.update({r.email.lower(): r}) for r in teacherList]\n\n for rec in rec_email:\n email = rec.lower()\n if existemail and email in existemail:\n continue\n # teacherQ= EmailSummariz.objects.filter(email=email, status__gt=0).first()\n teacherQ = teacher_email.get(email)\n if teacherQ:\n teacher_id = teacherQ.id\n Teachers.objects.filter(id=teacher_id).update(is_labcrawl=1)\n else:\n teacher_id = 0\n\n try:\n rQ = sd_model.SendEmail(email=email, teacher_id=teacher_id)\n rQ.save()\n self.recipientsQ.append(rQ)\n except:\n continue\n\n self.printlog('[return all recipientsQ] {}'.format(len(self.recipientsQ)))\n return self.recipientsQ\n\n def __save_emailmailing(self, recipientsQ, mailingQ, mailingbounce, mailingresponse):\n\n exist_emailmailing_list = {}\n exist_EmailMailing = list(sd_model.EmailMailing.objects.filter(\n Mailings_id=mailingQ.id, SendEmails__in=recipientsQ\n ).all())\n [exist_emailmailing_list.update({r.SendEmails.email: r.SendEmails.email}) for r in exist_EmailMailing]\n\n for rec in recipientsQ:\n email = rec.email.lower()\n if email in exist_emailmailing_list:\n self.printlog('[__save_emailmailing]: {} existed!'.format(email))\n continue\n item = dict()\n # item['status'] = 1\n item['SendEmails_id'] = rec.id\n item['Mailings_id'] = mailingQ.id\n otheritem = self.find_email_types(email, mailingbounce, mailingresponse)\n if otheritem.get('log_date') and 'T' in otheritem.get('log_date'):\n otheritem['log_date'] = self.to_date(otheritem['log_date'].split('+')[0], \"%Y-%m-%dT%H:%M:%S\")\n elif otheritem.get('log_date') and '-' in otheritem.get('log_date') and ' ' in otheritem.get('log_date'):\n otheritem['log_date'] = self.to_date(otheritem['log_date'].split('+')[0], \"%Y-%m-%d %H:%M:%S\")\n else:\n del otheritem['log_date']\n\n item.update(otheritem)\n\n sd_model.EmailMailing(**item).save()\n self.printlog('[__save_emailmailing]: {} save!'.format(email))\n\n def find_email_types(self, email, mailingbounce, mailingresponse):\n rdict = {\"hard\": 0, 'soft': 0, 'send': 1, 'open': 0, 'click': 0, 'trigger': 0, 'log_date': '', 'remark': ''}\n email = email.lower()\n bad = False\n for bounce in mailingbounce:\n if email == bounce.field.lower():\n bad = True\n if bounce.type == 'soft':\n rdict.update({'soft': 1})\n elif bounce.type == 'hard':\n rdict.update({'hard': 1})\n rdict.update({'remark': self.getattr(bounce, 'message')})\n rdict.update({'log_date': str(self.getattr(bounce, 'log_date'))})\n if bad:\n rdict.update({'send': 0})\n return rdict\n else:\n for res in mailingresponse:\n if email == res.field.lower():\n if res.type == 'open':\n rdict.update({'open': 1})\n elif res.type == 'click':\n rdict.update({'click': 1})\n elif res.type == 'trigger':\n rdict.update({'trigger': 1})\n rdict.update({'remark': self.getattr(res, 'message')})\n rdict.update({'log_date': str(self.getattr(res, 'log_date'))})\n\n return rdict\n\n def SaopResult(self, result, filed='result'):\n \"\"\"处理saop 返回的结果\"\"\"\n result = result.get(filed, {})\n ret = []\n if result:\n for k, row in result.items():\n for cl in row:\n ret.append(cl)\n return ret", "sub_path": "sendblaster/rq/webpower.py", "file_name": "webpower.py", "file_ext": "py", "file_size_in_byte": 13748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sendblaster.rq.baseapi.BaseApi", "line_number": 12, "usage_type": "name"}, {"api_name": "core.soapApi.WebpowerSoapBase", "line_number": 15, "usage_type": "call"}, {"api_name": "sendblaster.models.Brand", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 32, "usage_type": "name"}, {"api_name": "sendblaster.models.Brand.objects.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "sendblaster.models.Brand", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 43, "usage_type": "name"}, {"api_name": "sendblaster.models.Campaign", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 68, "usage_type": "name"}, {"api_name": "sendblaster.models.Campaign.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "sendblaster.models.Campaign", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 80, "usage_type": "name"}, {"api_name": "sendblaster.models.Mailing", "line_number": 123, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 123, "usage_type": "name"}, {"api_name": "sendblaster.models.Mailing", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 159, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 167, "usage_type": "call"}, {"api_name": "sendblaster.models.Mailing.objects.filter", "line_number": 200, "usage_type": "call"}, {"api_name": "sendblaster.models.Mailing", "line_number": 200, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 200, "usage_type": "name"}, {"api_name": "sendblaster.models.SendEmail.objects.filter", "line_number": 212, "usage_type": "call"}, {"api_name": "sendblaster.models.SendEmail", "line_number": 212, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 212, "usage_type": "name"}, {"api_name": "emails.models.EmailSummariz.objects.filter", "line_number": 220, "usage_type": "call"}, {"api_name": "emails.models.EmailSummariz.objects", "line_number": 220, "usage_type": "attribute"}, {"api_name": "emails.models.EmailSummariz", "line_number": 220, "usage_type": "name"}, {"api_name": "jobs.models.Teachers.objects.filter", "line_number": 232, "usage_type": "call"}, {"api_name": "jobs.models.Teachers.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "jobs.models.Teachers", "line_number": 232, "usage_type": "name"}, {"api_name": "sendblaster.models.SendEmail", "line_number": 237, "usage_type": "call"}, {"api_name": "sendblaster.models", "line_number": 237, "usage_type": "name"}, {"api_name": "sendblaster.models.EmailMailing.objects.filter", "line_number": 249, "usage_type": "call"}, {"api_name": "sendblaster.models.EmailMailing", "line_number": 249, "usage_type": "attribute"}, {"api_name": "sendblaster.models", "line_number": 249, "usage_type": "name"}, {"api_name": "sendblaster.models.EmailMailing", "line_number": 273, "usage_type": "call"}, {"api_name": "sendblaster.models", "line_number": 273, "usage_type": "name"}]} +{"seq_id": "586919308", "text": "# %%\n# %%\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport fiona\nfrom shapely.geometry import Point\nimport contextily as ctx\n\n# %%\n\n# 1. Streamgauges: Gauges II USGS stream gauge dataset\n# https://water.usgs.gov/GIS/metadata/usgswrd/XML/gagesII_Sept2011.xml#stdorder\n\n# Reading it using geopandas\nfile = os.path.join('data/gagesII_9322_point_shapefile',\n 'gagesII_9322_sept30_2011.shp')\ngages = gpd.read_file(file)\n\n# Exploring\ngages.columns\ngages.shape\n\n# Looking at the geometry now\ngages.geom_type\n# check our CRS - coordinate reference system\ngages.crs\n# Check the spatial extent\ngages.total_bounds\ngages.describe\n\n# %%\n# Now lets make a map!\nfig, ax = plt.subplots(figsize=(5, 5))\ngages.plot(ax=ax)\nplt.show()\n\n# Zoom in and just look at AZ\ngages.columns\ngages.STATE.unique()\ngages_AZ = gages[gages['STATE'] == 'AZ']\ngages_AZ.shape\n\n# Basic plot of AZ gages\nfig, ax = plt.subplots(figsize=(5, 5))\ngages_AZ.plot(ax=ax)\nplt.show()\n\n# More advanced - color by attribute\nfig, ax = plt.subplots(figsize=(5, 5))\ngages_AZ.plot(column='DRAIN_SQKM', categorical=False,\n legend=True, marker='^', markersize=45, cmap='viridis',\n ax=ax)\nax.set_title(\"Arizona stream gauge drainge area\\n (sq km)\")\nplt.show()\n\n# %%\n# 2. Watershed: Boundaries for Lower Colorado basin\n# https://www.usgs.gov/core-science-systems/ngp/national-hydrography/access-national-hydrography-products\n# https://viewer.nationalmap.gov/basic/?basemap=b1&category=nhd&title=NHD%20View\n\n# Watershed boundaries for the lower colorado\nfile = os.path.join('data/WBD_15_HU2_GDB', 'WBD_15_HU2_GDB.gdb')\nfiona.listlayers(file)\nHUC6 = gpd.read_file(file, layer=\"WBDHU6\")\n\ntype(HUC6)\nHUC6.head()\n\n# %%\n# Adding Verde River Stream gauge\nverde_gage = gages_AZ[gages_AZ['STAID'] ==\n '09506000'][['LAT_GAGE', 'LNG_GAGE']]\npoint_list = np.array([[-111.789871, 34.448361]])\n\n# make these into spatial features\npoint_geom = [Point(xy) for xy in point_list]\n\n# mape a dataframe of these points\npoint_df = gpd.GeoDataFrame(point_geom, columns=['geometry'],\n crs=HUC6.crs)\n\n# plot these on the first dataset\nfig, ax = plt.subplots(figsize=(8, 8))\nHUC6.plot(ax=ax, edgecolor='k', facecolor='cyan')\npoint_df.plot(ax=ax, color='red', marker='s', markersize=30)\nax.set_title(\"HUC Boundaries\")\nplt.show()\n\n# %%\n# 3. Arizona state boundaries\n# https://www.sciencebase.gov/catalog/item/59fa9f59e4b0531197affb13\n\nfilepath2 = os.path.join('data/State_bnd', 'GU_StateOrTerritory.shp')\nfiona.listlayers(filepath2)\nstate = gpd.read_file(filepath2)\n\nstate.type\nstate.head()\nstate_AZ = state[state['State_Name'] == 'Arizona'][['geometry']]\nstate_df = gpd.GeoDataFrame(state_AZ, columns=['geometry'],\n crs=HUC6.crs)\n\n# plot of Arizona state boundaries\nfig, ax = plt.subplots(figsize=(8, 8))\nstate_df.plot(ax=ax, color='none', edgecolor='Red')\nax.set_title(\"Arizona boundaries\")\nplt.show()\nstate.crs\n\n# 4. Lakes in Arizona state\n\nfilepath4 = os.path.join('data/Lakes', 'hydrography_p_lakes_v2.shp')\nprint(os.getcwd())\nprint(filepath4)\nos.path.exists(filepath4)\nlakes = gpd.read_file(filepath4)\n\nlakes.columns\nlakes.shape\n\nlakes.crs\nlake_data = gpd.GeoDataFrame(lakes, crs=HUC6.crs)\nlake_data = lake_data.to_crs(HUC6.crs)\nUS_lakes = lake_data[lake_data['COUNTRY'] == 'USA']\nLake_Mead = US_lakes[US_lakes['NAMEEN'] == 'Lake Mead']\nlake_project = Lake_Mead.to_crs(gages_AZ.crs)\nfig, ax = plt.subplots(figsize=(5, 5))\nlake_project.plot(ax=ax, color='Red')\n\n# %%\n# Plotting everything together\n\n# Re-projecting Verde gage\npoints_project = point_df.to_crs(gages_AZ.crs)\n\n# Plotting gages and Verde gage\nfig, ax = plt.subplots(figsize=(8, 8))\ngages_AZ.plot(column='DRAIN_SQKM', categorical=False,\n legend=True, marker='^', markersize=45, cmap='viridis',\n ax=ax)\nax.set_title(\"Arizona stream gauge drainge area\\n (sq km)\")\npoints_project.plot(ax=ax, color='r', marker='s', markersize=35)\n\nHUC6_project = HUC6.to_crs(gages_AZ.crs)\n\nState_bnd = state_df.to_crs(gages_AZ.crs)\n\nfig, ax = plt.subplots(figsize=(8, 8))\nState_bnd.plot(ax=ax, color='none', edgecolor='Red')\nax.set_title(\"Arizona boundaries\")\nplt.show()\n# %%\n\n# Plotting final all in one\nfig, ax = plt.subplots(figsize=(8, 8))\nHUC6_project.plot(ax=ax, edgecolor='black', alpha=0.5,\n facecolor='cyan', legend=True)\nlake_project.plot(ax=ax, color='blue', edgecolor='magenta', linewidth=4)\nState_bnd.plot(ax=ax, color='yellow', alpha=0.5, edgecolor='r', linewidth=2)\ngages_AZ.plot(column='DRAIN_SQKM', categorical=False,\n legend=True, marker='^', markersize=30, cmap='viridis',\n ax=ax)\npoints_project.plot(ax=ax, color='r', marker='s', markersize=45)\nctx.add_basemap(ax)\nax.set_title(\"Stream guages in Arizona in lower Colorado basin\")\nax.legend(['Stream guages', 'Verde River guage'])\nplt.show()\nfig.savefig(\"Map.png\")\n\n# %%\n", "sub_path": "class_scripts/maps/Narkhede_map.py", "file_name": "Narkhede_map.py", "file_ext": "py", "file_size_in_byte": 4978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "geopandas.read_file", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fiona.listlayers", "line_number": 67, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 80, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "fiona.listlayers", "line_number": 98, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 99, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "geopandas.read_file", "line_number": 120, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "contextily.add_basemap", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}]} +{"seq_id": "388052110", "text": "\"\"\"\nECE 4564\nAssignment 1\n\nCreate a Socket Client to Request Data from the Portal\n\"\"\"\n\n__author__ = \"Bryse Flowers\"\n\n# External Includes\nimport argparse # CLI\nimport socket # Network Communication\nimport json # Input Configuration\nimport errno # Error Handling Numbers\nimport sys # Exiting Program with Error Codes\n\n# Internal Includes\nfrom proto.request import Request # Google Protobuf Wrapper\nfrom proto.response import Response\n\ndef main():\n '''Main execution loop for socket client'''\n # Create the argument parser\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", help=\"Print Debug Output\",\n action=\"store_true\")\n parser.add_argument(\"-p\", \"--port\", help=\"Port Number to Connect To\",\n default=1313, type=int)\n parser.add_argument(\"-i\", \"--ip_address\", help=\"IP Address to Connect To\",\n default=\"omaha.local\", type=str)\n parser.add_argument(\"json_input\", help=\"File to create request from\")\n\n # Parse arguments\n args = parser.parse_args()\n\n # Store Arguments\n verbose = args.verbose\n host = args.ip_address\n port = args.port\n\n if verbose:\n print(args)\n\n # Create request from json file\n try:\n with open(args.json_input, 'r') as infile:\n data = json.load(infile)\n messages = [Request(msg, verbose) for msg in data.get(\"requests\", list())]\n except IOError:\n sys.exit(\"ERROR: IOError Raised on JSON Read\")\n except (TypeError, KeyError, AttributeError, ValueError) as e:\n sys.exit(\"ERROR: Creating Request \" + str(type(e)) + \" -- \" + str(e))\n\n try:\n for msg in messages:\n if verbose:\n print(\"Sending Request:\\n----\\n\" + str(msg) + \"----\")\n sendRequest(host, port, msg.toProto())\n except socket.gaierror:\n sys.exit(\"ERROR: Host Resolution Failed\")\n except socket.error as e:\n if e.errno != errno.ECONNREFUSED:\n raise e\n sys.exit(\"ERROR: Connection Refused\")\n\n\ndef sendRequest(host, port, msg):\n '''Send a request to the portal'''\n size = 1024\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host,port))\n sent_bytes = 0\n while sent_bytes < len(msg):\n sent_bytes += s.send(msg[sent_bytes:])\n data = s.recv(size)\n s.close()\n answer = Response(data)\n print(\"Answer Received:\\n----\\n\" + str(answer) + \"----\\n\\n\")\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "HW1/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "json.load", "line_number": 47, "usage_type": "call"}, {"api_name": "proto.request.Request", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 52, "usage_type": "call"}, {"api_name": "socket.gaierror", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 61, "usage_type": "attribute"}, {"api_name": "errno.ECONNREFUSED", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 70, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 70, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 70, "usage_type": "attribute"}, {"api_name": "proto.response.Response", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "514797151", "text": "import newspaper\r\nimport nltk\r\n\r\nfrom newspaper import Article\r\nfirst_article = Article(url=\"https://share.america.gov/nasa-finds-7-earth-sized-planets-around-star/\", language='en')\r\nfirst_article.download()\r\nfirst_article.parse()\r\nprint(first_article.text)\r\n\r\nsentence = first_article.text\r\ntokens = nltk.word_tokenize(sentence)\r\nprint (tokens)\r\n\r\ntagged = nltk.pos_tag(tokens)\r\nprint (tagged[0:6])\r\n\r\nif tagged == 'NASA': # not right....\r\n print ('science')\r\n", "sub_path": "Newspaper_Text.py", "file_name": "Newspaper_Text.py", "file_ext": "py", "file_size_in_byte": 464, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "newspaper.Article", "line_number": 5, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "438613753", "text": "import json\n\nimport networkx\n\nfrom rundmcmc.partition import Partition\nfrom rundmcmc.updaters import (boundary_nodes, cut_edges, exterior_boundaries,\n interior_boundaries, perimeters, polsby_popper,\n Tally, cut_edges_by_part)\n\n\nclass GeographicPartition(Partition):\n default_updaters = {\n 'perimeters': perimeters,\n 'exterior_boundaries': exterior_boundaries,\n 'interior_boundaries': interior_boundaries,\n 'boundary_nodes': boundary_nodes,\n 'cut_edges': cut_edges,\n 'areas': Tally('area', alias='areas'),\n 'polsby_popper': polsby_popper,\n 'cut_edges_by_part': cut_edges_by_part\n }\n\n @classmethod\n def from_json_graph(cls, graph_path, assignment):\n with open(graph_path) as f:\n graph_data = json.load(f)\n graph = networkx.readwrite.adjacency_graph(graph_data)\n\n if isinstance(assignment, str):\n assignment = {node: graph.nodes[node][assignment]\n for node in graph.nodes}\n elif not isinstance(assignment, dict):\n raise TypeError(\"Assignment must be a dict or a node attribute key\")\n\n updaters = cls.default_updaters\n return cls(graph, assignment, updaters)\n", "sub_path": "rundmcmc/partition/geographic.py", "file_name": "geographic.py", "file_ext": "py", "file_size_in_byte": 1283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "rundmcmc.partition.Partition", "line_number": 11, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.perimeters", "line_number": 13, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.exterior_boundaries", "line_number": 14, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.interior_boundaries", "line_number": 15, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.boundary_nodes", "line_number": 16, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.cut_edges", "line_number": 17, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.Tally", "line_number": 18, "usage_type": "call"}, {"api_name": "rundmcmc.updaters.polsby_popper", "line_number": 19, "usage_type": "name"}, {"api_name": "rundmcmc.updaters.cut_edges_by_part", "line_number": 20, "usage_type": "name"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "networkx.readwrite.adjacency_graph", "line_number": 27, "usage_type": "call"}, {"api_name": "networkx.readwrite", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "607348914", "text": "import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.patches import Circle, Wedge, Rectangle\nfrom pylab import *\n\n#classe\nclass dataviz:\n\n def __init__(self, background = '#1B1B2F', auxiliary_background ='#22223D', colors = ['#F54291','#2AD5F5','#F5E55B','#A81D59','#2594A8', '#A89914'], color_labels = '#FFFFFF', palette = 'RdPu'):\n self.background = background\n self.auxiliary_background = auxiliary_background\n self.colors = colors\n self.color_labels = color_labels\n self.palette = palette\n\n def generates_figure(self, axes = ['bottom', 'left', 'right', 'bottom'], axes_labels = ['x', 'y'], grid = True):\n \"\"\"Generates the figure, axes and grids.\n \n Parameters\n ----------\n axes : list\n Axes list\n axes_labels : list\n Axes labels list\n grid : flag\n Grid flag\n Returns\n -------\n matplotlib.figure.Figure\n matplotlib.axes._subplots.AxesSubplot\n \"\"\"\n\n # generates the figure and axes\n fig, ax = plt.subplots(facecolor = self.background)\n # set background color\n ax = plt.gca()\n ax.set_facecolor(self.background)\n\n all_axes = ['bottom', 'left', 'right', 'top']\n delete_axes = set(all_axes) - set(axes)\n all_axes_labels = ['x', 'y']\n delete_axes_labels = set(all_axes_labels) - set(axes_labels)\n\n # axes configuration\n # removes the bottom and left axes\n for param in delete_axes:\n ax.spines[param].set_visible(False)\n # changes the color of the active axes\n for param in axes:\n ax.spines[param].set_color(self.color_labels)\n # changes the color of the labels \n for i in axes_labels:\n ax.tick_params(axis = i, colors = self.color_labels)\n # changes the color of the labels\n for i in delete_axes_labels:\n ax.tick_params(axis = i, colors = self.background)\n \n # grid configuration\n if grid:\n # add the grids\n plt.grid(color = self.color_labels, linestyle = ':', linewidth = 2, alpha = 0.1)\n\n return fig, ax\n\n def line_chart(self, x, y, legend = None, axes = ['bottom', 'left'], axes_labels = ['x','y'], grid = True, fname = None):\n \"\"\"Plots line graph with n lines.\n \n Parameters\n ----------\n x : list\n List with x values\n y : list\n List with y values\n legend : list\n List with the legends \n axes : list\n Axes list\n axes_labels : list\n Axes labels list\n grid : boolean\n Grid flag\n \"\"\"\n\n fig, ax = self.generates_figure(axes = axes, grid = grid, axes_labels= axes_labels)\n\n # plots the lines\n for i in range(0, len(y)):\n plt.plot(x[i],y[i], color = self.colors[i])\n \n # definition of shadow resources\n n_shades = 10\n diff_linewidth = 1.0\n alpha_value = 0.4 / n_shades\n\n # generates the neon effect\n for i in range(0, len(x)):\n for n in range(1, n_shades+1):\n plt.plot(x[i],y[i], linewidth = 2+(diff_linewidth*n), alpha = alpha_value, color = self.colors[i])\n\n # generates the shadow below the lines\n for i in range(0, len(x)):\n ax.fill_between(x = x[i], y1 = y[i],y2 = np.array(y[i]).min()+ 0.2*np.array(y[i]).min(), color = self.colors[i], alpha = 0.08)\n\n # generates the legend\n if legend == None:\n aux_legend = []\n for i in range(0, len(x)):\n aux_legend.append(f'line {i+1}')\n leg = ax.legend(aux_legend, frameon = False)\n else:\n leg = ax.legend(legend, frameon = False)\n\n # change de color legend \n for text in leg.get_texts():\n plt.setp(text, color = self.color_labels)\n\n # set x and y limits\n minn = np.array(x).min()\n maxx = np.array(x).max()\n plt.xlim(minn,maxx)\n\n minn = np.array(y).min() + 0.2*np.array(y).min()\n maxx = np.array(y).max() + 0.2*np.array(y).max()\n plt.ylim(minn,maxx)\n\n if(fname != None):\n plt.savefig(fname, transparent = True)\n return fig \n\n def bar_chart(self, labels, values, legend = None, axes = [], axes_labels = ['x'], grid = False, fname = None):\n \"\"\"Plots bar chart with n groups.\n \n Parameters\n ----------\n labels : list\n Lista(s) com os grupos.\n values : list\n Lista(s) com as quantidades por rupo.\n legend : list\n Lista com as legendas\n axes : list\n Axes list\n axes_labels : list\n Axes labels list\n grid : boolean\n Grid flag\n \"\"\"\n\n fig, ax = self.generates_figure(axes = axes, grid = grid, axes_labels= axes_labels)\n\n # set the width of the bars\n width = 0.8/len(values)\n x = np.arange(len(labels))\n\n # checks the position of the bars\n if len(values)%2 != 0:\n aux_x = np.arange(-len(values)+len(values)/2,len(values)-len(values)/2, dtype = float) * width\n else:\n aux_x = np.arange(-len(values)/2,(len(values))/2, dtype = float)* width\n\n # plots the bars\n for i in range(0, len(values)):\n bar = ax.bar(x+aux_x[i], values[i], width, color = self.colors[i], linewidth = 0.5)\n \n # Set the labels\n ax.set_xticks(x-width/2)\n ax.set_xticklabels(labels)\n\n # generates the legend\n if legend == None:\n aux_legend = []\n for i in range(0, len(x)):\n aux_legend.append(f'label {i+1}')\n leg = ax.legend(aux_legend, frameon = False)\n else:\n leg = ax.legend(legend, frameon = False)\n \n # changes the legends color\n for text in leg.get_texts():\n plt.setp(text, color = self.color_labels)\n \n # set y limits\n minn = np.array(values).min()\n if minn > 0:\n minn = 0\n else:\n minn = np.array(values).min() - 0.2*np.array(values).min() \n\n maxx = np.array(values).max() + 0.2*np.array(values).max() \n plt.ylim(minn,maxx)\n\n if(fname != None):\n plt.savefig(fname, transparent = True)\n return fig \n\n def progress_chart(self, value, circles = 4, fname = None):\n \"\"\"Plots gauge chart.\n \n Parameters\n ----------\n value : float\n Gauge value\n circles : int\n Number of circles\n \"\"\"\n\n # generates the figure\n fig, ax = plt.subplots(facecolor = self.background)\n ax = plt.gca()\n ax.set_facecolor(self.background)\n size = 0.1\n\n # circles configuration\n startingRadius = 0.7 + (0.2 * circles)\n percentage = value\n remainingPie = 100-value\n donut_sizes = [percentage, remainingPie]\n\n # plots the pies\n for i in range(1,circles + 1):\n plt.pie(donut_sizes, radius = startingRadius, startangle = 90, colors = [self.colors[i%2],self.auxiliary_background], wedgeprops ={'edgecolor': self.background, 'linewidth': 4})\n startingRadius -= 0.12\n percentage *= 0.5\n remainingPie *= 1\n donut_sizes = [percentage, remainingPie]\n\n # plots the cicle in the center\n circle = plt.Circle((0,0), startingRadius, color = self.background)\n p = plt.gcf()\n p.gca().add_artist(circle)\n ax.annotate(f'{value} %', (0,0), fontsize = 36, color = self.color_labels, va = 'center', ha = 'center', family = 'monospace')\n\n if(fname != None):\n plt.savefig(fname, transparent = True)\n return fig \n \n def horizontal_bar_chart(self, labels, values, fname = None):\n \"\"\"Plots horizontal bar with n groups.\n \n Parameters\n ----------\n labels : list\n Groups list\n values : list\n Values list\n \"\"\"\n\n # generates the figure\n fig, ax = plt.subplots(facecolor = self.background, figsize =(7, len(labels)))\n ax = plt.gca()\n ax.set_facecolor(self.background)\n\n # remove the axes\n for param in ['top', 'right', 'bottom', 'left']:\n ax.spines[param].set_visible(False)\n # change the color of the axes label\n for i in ['x','y']:\n ax.tick_params(axis = i, colors = self.color_labels)\n \n # remove labels of x axis\n ax.set_xticks([])\n \n # define the max value in the list\n maxx = 0\n for i in values:\n if i > maxx:\n maxx = i\n category = []\n\n # generates the labels of y axix\n for x in range(0,len(labels)):\n category.append(f'{labels[x]} - {values[x]}')\n\n # plot the background bar\n ax.barh(category, maxx + maxx/2, color = self.auxiliary_background , height = 0.2)\n # plot the bars\n ax.barh(category, values, color = self.colors, height = 0.2)\n\n if(fname != None):\n plt.savefig(fname, transparent = True)\n return fig \n\n def gauge(self, value, title = '', fname = None):\n \"\"\"Plots gauge chart.\n \n Parameters\n ----------\n value : float\n float value\n title : str\n gauge title\n \"\"\"\n\n # generates the figure\n fig, ax = plt.subplots(facecolor = self.background)\n ax = plt.gca()\n ax.set_facecolor(self.background)\n\n # generates color list from the palette\n cmap = cm.get_cmap(self.palette, 540)\n # get hex format colors\n colors = []\n for i in range(cmap.N):\n rgb = cmap(i)[:3]\n colors.append(matplotlib.colors.rgb2hex(rgb))\n \n # cuts the palette collor from the middle\n colors[1000:]\n colors = colors[::-1]\n value_ = str(value) + ' %'\n value = 225 -2.7*value\n\n # create the unit arches\n start = np.linspace(-45,225,270, endpoint = True)[0:-1]\n end = np.linspace(-45,225,270, endpoint = True)[1::]\n ang_range = np.c_[start,end]\n\n # create the arches\n patches = []\n for ang, c in zip(ang_range, colors):\n patches.append(Wedge((0.,0.), .4, *ang, facecolor = self.background, lw = 2))\n if ang.max() < value:\n patches.append(Wedge((0.,0.), .4, *ang, width = 0.07, facecolor = self.auxiliary_background, lw = 2))\n else:\n patches.append(Wedge((0.,0.), .4, *ang, width = 0.07, facecolor = c, lw = 2))\n\n # plots the archers\n [ax.add_patch(p) for p in patches]\n\n # add the minimum value\n ax.text(0.42 * np.cos(np.radians(225)), 0.42* np.sin(np.radians(225)), 0, horizontalalignment = 'center', verticalalignment = 'center', fontsize = 11, fontweight = 'ultralight', color = self.color_labels, rotation = np.degrees(np.radians(225) * np.pi / np.pi - np.radians(90)))\n\n # add the maximum value\n ax.text(0.42 * np.cos(np.radians(315)), 0.42* np.sin(np.radians(315)), 100, horizontalalignment = 'center', verticalalignment = 'center', fontsize = 11, fontweight = 'ultralight', color = self.color_labels, rotation = np.degrees(np.radians(315) * np.pi / np.pi - np.radians(90)))\n \n # writes the title\n ax.text(0, -0.38, title, horizontalalignment = 'center', verticalalignment = 'center', fontsize = 12, fontweight = 'bold', color = self.color_labels, fontfamily = 'sans-serif')\n\n # writes the value\n ax.text(0, -.3, value_, horizontalalignment = 'center', verticalalignment = 'center', fontsize = 32, fontweight = 'book', color = self.color_labels, fontfamily = 'sans-serif')\n\n # draw the arrow\n ax.arrow(0, 0, 0.32 * np.cos(np.radians(value)), 0.32 * np.sin(np.radians(value)),width=0.01, head_width=0.01, head_length=0.1, fc=self.color_labels, ec=self.color_labels, fill = True)\n \n # draw the circle inside the arrow\n ax.add_patch(Circle((0, 0), radius = 0.02, facecolor = self.color_labels))\n ax.add_patch(Circle((0, 0), radius = 0.01, facecolor = self.auxiliary_background, zorder = 12))\n\n # configure the display area\n ax.set_frame_on(False)\n ax.axes.set_xticks([])\n ax.axes.set_yticks([])\n ax.axis('equal')\n plt.tight_layout()\n\n if(fname != None):\n plt.savefig(fname, transparent = True)\n return fig ", "sub_path": "Projects/DataViz/pystaticplot/pystaticplot.py", "file_name": "pystaticplot.py", "file_ext": "py", "file_size_in_byte": 12690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "matplotlib.cm.get_cmap", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colors.rgb2hex", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colors", "line_number": 307, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 318, "usage_type": "attribute"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.patches.Wedge", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 333, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}]} +{"seq_id": "533201914", "text": "from bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nimport json\n \n\nartworks_list = []\ndeleted_imgs = []\ncounter = 1\n\ndef main():\n global artworks_list\n with open(\"deleted_imgs.txt\") as in_file:\n for line in in_file:\n img = line.strip(\"\\n\")\n deleted_imgs.append(img)\n\n with open(\"artist-list.txt\") as in_file:\n for line in in_file:\n artist = line.strip(\"\\n\")\n get_artworks(artist)\n artworks = remove_deleted_images(artworks_list)\n export_artworks_as_json(artworks)\n\n \n\ndef get_artworks(artist):\n global counter\n # acceptable file extensions\n file_extensions = ['jpg', 'jpeg', 'png']\n r = requests.get('https://en.wikipedia.org/wiki/' + artist)\n soup = BeautifulSoup(r.text, \"html.parser\")\n # get all anchor tags with class=\"image\"\n anchor_tags = soup.select(\"a[class=image]\")\n for a in anchor_tags:\n artwork_dict = {}\n # get src from image tag nested inside anchor tags\n src = a.select(\"img\")[0].get('src')\n # resize all images 650px\n src = re.sub(r'[0-9]*px', \"650px\", src).strip(\"//\")\n href = a.get('href')\n if href[-3:] in file_extensions:\n title = get_img_title(href)\n if title != None:\n n = 4 - len(str(counter))\n img_id = 'img' + n*str(0) + str(counter)\n artwork_dict[\"img_id\"] = img_id\n artwork_dict[\"title\"] = title\n artwork_dict[\"artist\"] = artist\n artwork_dict[\"level\"] = \"hard\"\n artworks_list.append(artwork_dict)\n print(img_id + \" added to artworks_list\")\n counter += 1\n #download_img(img_id, src)\n\ndef export_artworks_as_json(a):\n artwork_json = json.dumps(a, ensure_ascii=False).encode('utf8')\n outfile = open('artworks.json', 'wb+')\n outfile.write(artwork_json)\n print(\"SUCCESS\")\n outfile.close\n\ndef remove_deleted_images(a):\n global artworks_list\n artworks = []\n for img in a: \n img_id = img['img_id']\n if img_id not in deleted_imgs:\n artworks.append(img)\n return artworks\n\n\n# get title of image from wikipedia\ndef get_img_title(href):\n r = requests.get('https://en.wikipedia.org' + href)\n soup = BeautifulSoup(r.text, \"html.parser\") \n title_elt = soup.select(\"td[id=fileinfotpl_art_title] + td > span\")\n #artist_elt = soup.select(\"td[id=fileinfotpl_aut] + td > div > table > tr > th > span\")\n if len(title_elt) > 0:\n return title_elt[0].text\n\n# downloads image from url and stores in artworks folder \ndef download_img(img_id, url):\n img_data = requests.get('http://' + url).content\n img_path = './artworks2/' + img_id + '.jpg'\n with open(img_path, 'wb+') as handler:\n handler.write(img_data)\n print(img_id + \" successfully downloaded\")\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "wiki-scraper/wiki-scraper.py", "file_name": "wiki-scraper.py", "file_ext": "py", "file_size_in_byte": 2917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "564617318", "text": "from django.core.management.base import BaseCommand\n\n\n# python manage.py 파일에 커맨드를 생성하기 위해서는 BaseCommand를 상속하면 가능하다\nclass Command(BaseCommand):\n\n # help를 통해 --help 조건을 사용했을 때, 명령어의 설명을 추가할 수 있음\n help = \"This is my first Custom Command!\"\n\n # command에 변수를 추가하고 싶으면 add_arguments 함수를 통해 parser에 argument를 추가해줘야한다.\n def add_arguments(self, parser):\n parser.add_argument(\"--times\", help=\"How many times print 'FirstCommand!'?\")\n return super().add_arguments(parser)\n\n # 실제로 command를 실행하는 부분\n def handle(self, *args, **options):\n # options를 통해 변수를 받아올 수 있다.\n time = options.get(\"times\")\n if time is not None:\n for k in range(int(time)):\n self.stdout.write(self.style.SUCCESS(\"FirstCommand!\"))\n else:\n self.stdout.write(self.style.ERROR(\"FirstCommand!\"))\n", "sub_path": "rooms/management/commands/firstcommand.py", "file_name": "firstcommand.py", "file_ext": "py", "file_size_in_byte": 1034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "296075441", "text": "from django import forms\nfrom blog.models import Comment\n\n\nclass CommentForm(forms.ModelForm):\n content = forms.CharField(widget=forms.Textarea(attrs={'cols': 70, 'rows': 10}), help_text=\"Comment\")\n username = forms.CharField(max_length=30, help_text=\"Your Username\")\n email = forms.CharField(max_length=100, help_text=\"Your Email Address\")\n\n class Meta:\n model = Comment\n fields = ['content', 'username', 'email']\n", "sub_path": "CS2610-WebDev/cs2610proj/src/blog/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 442, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "blog.models.Comment", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "357540272", "text": "from django.forms import ModelForm\nfrom django.forms.models import inlineformset_factory\n\n# from crispy_forms.helper import FormHelper\n# from crispy_forms.layout import Layout, Fieldset\n\nfrom .models import Sales, SalesItem\n\nclass SalesForm(ModelForm):\n class Meta:\n model = Sales\n fields = ('sales_branch','name_of_customer','name_of_seller')\n\nclass SalesItemForm(ModelForm):\n class Meta:\n models = SalesItem\n fields = '__all__'\n\n\nSalesItemFormset = inlineformset_factory(\n Sales,\n SalesItem,\n fields=('sales','item_description','quantity','price', ),\n extra=2,\n can_delete=False,\n)\n\n", "sub_path": "sales/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.forms.ModelForm", "line_number": 9, "usage_type": "name"}, {"api_name": "models.Sales", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 14, "usage_type": "name"}, {"api_name": "models.SalesItem", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.models.inlineformset_factory", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Sales", "line_number": 21, "usage_type": "argument"}, {"api_name": "models.SalesItem", "line_number": 22, "usage_type": "argument"}]} +{"seq_id": "89689434", "text": "\"\"\"entries table\n\nRevision ID: 0751becb2c1f\nRevises: 13316e9313bb\nCreate Date: 2022-10-04 21:50:50.856467\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0751becb2c1f'\ndown_revision = '13316e9313bb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_entry_description', table_name='entry')\n op.drop_index('ix_entry_title', table_name='entry')\n op.drop_table('entry')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('entry',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('title', sa.VARCHAR(length=64), nullable=False),\n sa.Column('description', sa.VARCHAR(length=120), nullable=False),\n sa.Column('status', sa.BOOLEAN(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_entry_title', 'entry', ['title'], unique=False)\n op.create_index('ix_entry_description', 'entry', ['description'], unique=False)\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/0751becb2c1f_entries_table.py", "file_name": "0751becb2c1f_entries_table.py", "file_ext": "py", "file_size_in_byte": 1143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "alembic.op.drop_index", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.create_table", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}, {"api_name": "alembic.op.create_index", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "357021343", "text": "\"\"\"\nSchemas for special responses from the Orion API.\n\"\"\"\n\nimport datetime\nfrom typing import List\n\nfrom pydantic import Field\nfrom typing_extensions import Literal\n\nimport prefect.orion.schemas as schemas\nfrom prefect.orion.utilities.schemas import PrefectBaseModel\nfrom prefect.utilities.enum import AutoEnum\n\n\nclass SetStateStatus(AutoEnum):\n \"\"\"Enumerates return statuses for setting run states.\"\"\"\n\n ACCEPT = AutoEnum.auto()\n REJECT = AutoEnum.auto()\n ABORT = AutoEnum.auto()\n WAIT = AutoEnum.auto()\n\n\nclass StateAcceptDetails(PrefectBaseModel):\n \"\"\"Details associated with an ACCEPT state transition.\"\"\"\n\n type: Literal[\"accept_details\"] = Field(\n \"accept_details\",\n description=\"The type of state transition detail. Used to ensure pydantic does not coerce into a different type.\",\n )\n\n\nclass StateRejectDetails(PrefectBaseModel):\n \"\"\"Details associated with a REJECT state transition.\"\"\"\n\n type: Literal[\"reject_details\"] = Field(\n \"reject_details\",\n description=\"The type of state transition detail. Used to ensure pydantic does not coerce into a different type.\",\n )\n reason: str = Field(\n None, description=\"The reason why the state transition was rejected.\"\n )\n\n\nclass StateAbortDetails(PrefectBaseModel):\n \"\"\"Details associated with an ABORT state transition.\"\"\"\n\n type: Literal[\"abort_details\"] = Field(\n \"abort_details\",\n description=\"The type of state transition detail. Used to ensure pydantic does not coerce into a different type.\",\n )\n reason: str = Field(\n None, description=\"The reason why the state transition was aborted.\"\n )\n\n\nclass StateWaitDetails(PrefectBaseModel):\n \"\"\"Details associated with a WAIT state transition.\"\"\"\n\n type: Literal[\"wait_details\"] = Field(\n \"wait_details\",\n description=\"The type of state transition detail. Used to ensure pydantic does not coerce into a different type.\",\n )\n delay_seconds: int = Field(\n ...,\n description=\"The length of time in seconds the client should wait before transitioning states.\",\n )\n reason: str = Field(\n None, description=\"The reason why the state transition should wait.\"\n )\n\n\nclass HistoryResponseState(PrefectBaseModel):\n \"\"\"Represents a single state's history over an interval.\"\"\"\n\n state_type: schemas.states.StateType = Field(..., description=\"The state type.\")\n state_name: str = Field(..., description=\"The state name.\")\n count_runs: int = Field(\n ...,\n description=\"The number of runs in the specified state during the interval.\",\n )\n sum_estimated_run_time: datetime.timedelta = Field(\n ..., description=\"The total estimated run time of all runs during the interval.\"\n )\n sum_estimated_lateness: datetime.timedelta = Field(\n ...,\n description=\"The sum of differences between actual and expected start time during the interval.\",\n )\n\n\nclass HistoryResponse(PrefectBaseModel):\n \"\"\"Represents a history of aggregation states over an interval\"\"\"\n\n interval_start: datetime.datetime = Field(\n ..., description=\"The start date of the interval.\"\n )\n interval_end: datetime.datetime = Field(\n ..., description=\"The end date of the interval.\"\n )\n states: List[HistoryResponseState] = Field(\n ..., description=\"A list of state histories during the interval.\"\n )\n", "sub_path": "src/prefect/orion/schemas/responses.py", "file_name": "responses.py", "file_ext": "py", "file_size_in_byte": 3413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "prefect.utilities.enum.AutoEnum", "line_number": 16, "usage_type": "name"}, {"api_name": "prefect.utilities.enum.AutoEnum.auto", "line_number": 19, "usage_type": "call"}, {"api_name": "prefect.utilities.enum.AutoEnum", "line_number": 19, "usage_type": "name"}, {"api_name": "prefect.utilities.enum.AutoEnum.auto", "line_number": 20, "usage_type": "call"}, {"api_name": "prefect.utilities.enum.AutoEnum", "line_number": 20, "usage_type": "name"}, {"api_name": "prefect.utilities.enum.AutoEnum.auto", "line_number": 21, "usage_type": "call"}, {"api_name": "prefect.utilities.enum.AutoEnum", "line_number": 21, "usage_type": "name"}, {"api_name": "prefect.utilities.enum.AutoEnum.auto", "line_number": 22, "usage_type": "call"}, {"api_name": "prefect.utilities.enum.AutoEnum", "line_number": 22, "usage_type": "name"}, {"api_name": "prefect.orion.utilities.schemas.PrefectBaseModel", "line_number": 25, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 28, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 28, "usage_type": "call"}, {"api_name": "prefect.orion.utilities.schemas.PrefectBaseModel", "line_number": 34, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 37, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 37, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 41, "usage_type": "call"}, {"api_name": "prefect.orion.utilities.schemas.PrefectBaseModel", "line_number": 46, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 49, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 49, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 53, "usage_type": "call"}, {"api_name": "prefect.orion.utilities.schemas.PrefectBaseModel", "line_number": 58, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 61, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 61, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 65, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 69, "usage_type": "call"}, {"api_name": "prefect.orion.utilities.schemas.PrefectBaseModel", "line_number": 74, "usage_type": "name"}, {"api_name": "prefect.orion.schemas.states", "line_number": 77, "usage_type": "attribute"}, {"api_name": "prefect.orion.schemas", "line_number": 77, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 77, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 78, "usage_type": "call"}, {"api_name": "pydantic.Field", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pydantic.Field", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pydantic.Field", "line_number": 86, "usage_type": "call"}, {"api_name": "prefect.orion.utilities.schemas.PrefectBaseModel", "line_number": 92, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pydantic.Field", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pydantic.Field", "line_number": 98, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 101, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "276778778", "text": "import pytest\nfrom hexbytes import HexBytes\n\nfrom vyper.builtins.functions import eip1167_bytecode\nfrom vyper.exceptions import ArgumentException, InvalidType, StateAccessViolation\n\npytestmark = pytest.mark.usefixtures(\"memory_mocker\")\n\n\ndef test_max_outsize_exceeds_returndatasize(get_contract):\n source_code = \"\"\"\n@external\ndef foo() -> Bytes[7]:\n return raw_call(0x0000000000000000000000000000000000000004, b\"moose\", max_outsize=7)\n \"\"\"\n c = get_contract(source_code)\n assert c.foo() == b\"moose\"\n\n\ndef test_raw_call_non_memory(get_contract):\n source_code = \"\"\"\n_foo: Bytes[5]\n@external\ndef foo() -> Bytes[5]:\n self._foo = b\"moose\"\n return raw_call(0x0000000000000000000000000000000000000004, self._foo, max_outsize=5)\n \"\"\"\n c = get_contract(source_code)\n assert c.foo() == b\"moose\"\n\n\ndef test_returndatasize_exceeds_max_outsize(get_contract):\n source_code = \"\"\"\n@external\ndef foo() -> Bytes[3]:\n return raw_call(0x0000000000000000000000000000000000000004, b\"moose\", max_outsize=3)\n \"\"\"\n c = get_contract(source_code)\n assert c.foo() == b\"moo\"\n\n\ndef test_returndatasize_matches_max_outsize(get_contract):\n source_code = \"\"\"\n@external\ndef foo() -> Bytes[5]:\n return raw_call(0x0000000000000000000000000000000000000004, b\"moose\", max_outsize=5)\n \"\"\"\n c = get_contract(source_code)\n assert c.foo() == b\"moose\"\n\n\ndef test_multiple_levels(w3, get_contract_with_gas_estimation):\n inner_code = \"\"\"\n@external\ndef returnten() -> int128:\n return 10\n \"\"\"\n\n c = get_contract_with_gas_estimation(inner_code)\n\n outer_code = \"\"\"\n@external\ndef create_and_call_returnten(inp: address) -> int128:\n x: address = create_minimal_proxy_to(inp)\n o: int128 = extract32(raw_call(x, b\"\\\\xd0\\\\x1f\\\\xb1\\\\xb8\", max_outsize=32, gas=50000), 0, output_type=int128) # noqa: E501\n return o\n\n@external\ndef create_and_return_proxy(inp: address) -> address:\n x: address = create_minimal_proxy_to(inp)\n return x\n \"\"\"\n\n c2 = get_contract_with_gas_estimation(outer_code)\n assert c2.create_and_call_returnten(c.address) == 10\n c2.create_and_call_returnten(c.address, transact={})\n\n _, preamble, callcode = eip1167_bytecode()\n\n c3 = c2.create_and_return_proxy(c.address, call={})\n c2.create_and_return_proxy(c.address, transact={})\n\n c3_contract_code = w3.to_bytes(w3.eth.get_code(c3))\n\n assert c3_contract_code[:10] == HexBytes(preamble)\n assert c3_contract_code[-15:] == HexBytes(callcode)\n\n print(\"Passed proxy test\")\n # TODO: This one is special\n # print(f'Gas consumed: {(chain.head_state.receipts[-1].gas_used - chain.head_state.receipts[-2].gas_used - chain.last_tx.intrinsic_gas_used)}') # noqa: E501\n\n\ndef test_multiple_levels2(assert_tx_failed, get_contract_with_gas_estimation):\n inner_code = \"\"\"\n@external\ndef returnten() -> int128:\n raise\n \"\"\"\n\n c = get_contract_with_gas_estimation(inner_code)\n\n outer_code = \"\"\"\n@external\ndef create_and_call_returnten(inp: address) -> int128:\n x: address = create_minimal_proxy_to(inp)\n o: int128 = extract32(raw_call(x, b\"\\\\xd0\\\\x1f\\\\xb1\\\\xb8\", max_outsize=32, gas=50000), 0, output_type=int128) # noqa: E501\n return o\n\n@external\ndef create_and_return_proxy(inp: address) -> address:\n return create_minimal_proxy_to(inp)\n \"\"\"\n\n c2 = get_contract_with_gas_estimation(outer_code)\n\n assert_tx_failed(lambda: c2.create_and_call_returnten(c.address))\n\n print(\"Passed minimal proxy exception test\")\n\n\ndef test_delegate_call(w3, get_contract):\n inner_code = \"\"\"\na: address # this is required for storage alignment...\nowners: public(address[5])\n\n@external\ndef set_owner(i: int128, o: address):\n self.owners[i] = o\n \"\"\"\n\n inner_contract = get_contract(inner_code)\n\n outer_code = \"\"\"\nowner_setter_contract: public(address)\nowners: public(address[5])\n\n\n@external\ndef __init__(_owner_setter: address):\n self.owner_setter_contract = _owner_setter\n\n\n@external\ndef set(i: int128, owner: address):\n # delegate setting owners to other contract.s\n cdata: Bytes[68] = concat(method_id(\"set_owner(int128,address)\"), convert(i, bytes32), convert(owner, bytes32)) # noqa: E501\n raw_call(\n self.owner_setter_contract,\n cdata,\n gas=msg.gas,\n max_outsize=0,\n is_delegate_call=True\n )\n \"\"\"\n\n a0, a1, a2 = w3.eth.accounts[:3]\n outer_contract = get_contract(outer_code, *[inner_contract.address])\n\n # Test setting on inners contract's state setting works.\n inner_contract.set_owner(1, a2, transact={})\n assert inner_contract.owners(1) == a2\n\n # Confirm outer contract's state is empty and contract to call has been set.\n assert outer_contract.owner_setter_contract() == inner_contract.address\n assert outer_contract.owners(1) is None\n\n # Call outer contract, that make a delegate call to inner_contract.\n tx_hash = outer_contract.set(1, a1, transact={})\n assert w3.eth.get_transaction_receipt(tx_hash)[\"status\"] == 1\n assert outer_contract.owners(1) == a1\n\n\ndef test_gas(get_contract, assert_tx_failed):\n inner_code = \"\"\"\nbar: bytes32\n\n@external\ndef foo(_bar: bytes32):\n self.bar = _bar\n \"\"\"\n\n inner_contract = get_contract(inner_code)\n\n outer_code = \"\"\"\n@external\ndef foo_call(_addr: address):\n cdata: Bytes[40] = concat(\n method_id(\"foo(bytes32)\"),\n 0x0000000000000000000000000000000000000000000000000000000000000001\n )\n raw_call(_addr, cdata, max_outsize=0{})\n \"\"\"\n\n # with no gas value given, enough will be forwarded to complete the call\n outer_contract = get_contract(outer_code.format(\"\"))\n outer_contract.foo_call(inner_contract.address)\n\n # manually specifying a sufficient amount should succeed\n outer_contract = get_contract(outer_code.format(\", gas=50000\"))\n outer_contract.foo_call(inner_contract.address)\n\n # manually specifying an insufficient amount should fail\n outer_contract = get_contract(outer_code.format(\", gas=15000\"))\n assert_tx_failed(lambda: outer_contract.foo_call(inner_contract.address))\n\n\ndef test_static_call(get_contract):\n target_source = \"\"\"\n@external\n@view\ndef foo() -> int128:\n return 42\n\"\"\"\n\n caller_source = \"\"\"\n@external\n@view\ndef foo(_addr: address) -> int128:\n _response: Bytes[32] = raw_call(\n _addr,\n method_id(\"foo()\"),\n max_outsize=32,\n is_static_call=True,\n )\n return convert(_response, int128)\n \"\"\"\n\n target = get_contract(target_source)\n caller = get_contract(caller_source)\n\n assert caller.foo(target.address) == 42\n\n\ndef test_forward_calldata(get_contract, w3, keccak):\n target_source = \"\"\"\n@external\ndef foo() -> uint256:\n return 123\n \"\"\"\n\n caller_source = \"\"\"\ntarget: address\n\n@external\ndef set_target(target: address):\n self.target = target\n\n@external\ndef __default__():\n assert 123 == _abi_decode(raw_call(self.target, msg.data, max_outsize=32), uint256)\n \"\"\"\n\n target = get_contract(target_source)\n\n caller = get_contract(caller_source)\n caller.set_target(target.address, transact={})\n\n # manually construct msg.data for `caller` contract\n sig = keccak(\"foo()\".encode()).hex()[:10]\n w3.eth.send_transaction({\"to\": caller.address, \"data\": sig})\n\n\ndef test_static_call_fails_nonpayable(get_contract, assert_tx_failed):\n target_source = \"\"\"\nbaz: int128\n\n@external\ndef foo() -> int128:\n self.baz = 31337\n return self.baz\n\"\"\"\n\n caller_source = \"\"\"\n@external\n@view\ndef foo(_addr: address) -> int128:\n _response: Bytes[32] = raw_call(\n _addr,\n method_id(\"foo()\"),\n max_outsize=32,\n is_static_call=True,\n )\n return convert(_response, int128)\n \"\"\"\n\n target = get_contract(target_source)\n caller = get_contract(caller_source)\n\n assert_tx_failed(lambda: caller.foo(target.address))\n\n\ndef test_checkable_raw_call(get_contract, assert_tx_failed):\n target_source = \"\"\"\nbaz: int128\n@external\ndef fail1(should_raise: bool):\n if should_raise:\n raise \"fail\"\n\n# test both paths for raw_call -\n# they are different depending if callee has or doesn't have returntype\n# (fail2 fails because of staticcall)\n@external\ndef fail2(should_raise: bool) -> int128:\n if should_raise:\n self.baz = self.baz + 1\n return self.baz\n\"\"\"\n\n caller_source = \"\"\"\n@external\n@view\ndef foo(_addr: address, should_raise: bool) -> uint256:\n success: bool = True\n response: Bytes[32] = b\"\"\n success, response = raw_call(\n _addr,\n _abi_encode(should_raise, method_id=method_id(\"fail1(bool)\")),\n max_outsize=32,\n is_static_call=True,\n revert_on_failure=False,\n )\n assert success == (not should_raise)\n return 1\n\n@external\n@view\ndef bar(_addr: address, should_raise: bool) -> uint256:\n success: bool = True\n response: Bytes[32] = b\"\"\n success, response = raw_call(\n _addr,\n _abi_encode(should_raise, method_id=method_id(\"fail2(bool)\")),\n max_outsize=32,\n is_static_call=True,\n revert_on_failure=False,\n )\n assert success == (not should_raise)\n return 2\n\n# test max_outsize not set case\n@external\n@nonpayable\ndef baz(_addr: address, should_raise: bool) -> uint256:\n success: bool = True\n success = raw_call(\n _addr,\n _abi_encode(should_raise, method_id=method_id(\"fail1(bool)\")),\n revert_on_failure=False,\n )\n assert success == (not should_raise)\n return 3\n \"\"\"\n\n target = get_contract(target_source)\n caller = get_contract(caller_source)\n\n assert caller.foo(target.address, True) == 1\n assert caller.foo(target.address, False) == 1\n assert caller.bar(target.address, True) == 2\n assert caller.bar(target.address, False) == 2\n assert caller.baz(target.address, True) == 3\n assert caller.baz(target.address, False) == 3\n\n\nuncompilable_code = [\n (\n \"\"\"\n@external\n@view\ndef foo(_addr: address):\n raw_call(_addr, method_id(\"foo()\"))\n \"\"\",\n StateAccessViolation,\n ),\n (\n \"\"\"\n@external\ndef foo(_addr: address):\n raw_call(_addr, method_id(\"foo()\"), is_delegate_call=True, is_static_call=True)\n \"\"\",\n ArgumentException,\n ),\n (\n \"\"\"\n@external\n@view\ndef foo(_addr: address):\n raw_call(_addr, 256)\n \"\"\",\n InvalidType,\n ),\n]\n\n\n@pytest.mark.parametrize(\"source_code,exc\", uncompilable_code)\ndef test_invalid_type_exception(\n assert_compile_failed, get_contract_with_gas_estimation, source_code, exc\n):\n assert_compile_failed(lambda: get_contract_with_gas_estimation(source_code), exc)\n", "sub_path": "tests/parser/functions/test_raw_call.py", "file_name": "test_raw_call.py", "file_ext": "py", "file_size_in_byte": 10546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pytest.mark.usefixtures", "line_number": 7, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 7, "usage_type": "attribute"}, {"api_name": "vyper.builtins.functions.eip1167_bytecode", "line_number": 78, "usage_type": "call"}, {"api_name": "hexbytes.HexBytes", "line_number": 85, "usage_type": "call"}, {"api_name": "hexbytes.HexBytes", "line_number": 86, "usage_type": "call"}, {"api_name": "vyper.exceptions.StateAccessViolation", "line_number": 374, "usage_type": "name"}, {"api_name": "vyper.exceptions.ArgumentException", "line_number": 382, "usage_type": "name"}, {"api_name": "vyper.exceptions.InvalidType", "line_number": 391, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 396, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 396, "usage_type": "attribute"}]} +{"seq_id": "185359453", "text": "#!/usr/bin/env python\nfrom postmarkup import textilize\n\nfrom django.db import models\nfrom django.db.models import CharField, TextField, IntegerField\n\nfrom base64 import encodestring, decodestring\nimport binascii\n\nMARKUP_TYPES = [ (\"html\", \"Raw HTML\"),\n (\"postmarkup\", \"Postmarkup (BBCode like)\"),\n (\"emarkup\", \"Extended markup\"),\n (\"text\", \"Plain text\"),\n (\"comment_bbcode\", \"BBcode used in comments\"), ]\n\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nclass PickledObject(str):\n \"\"\"A subclass of string so it can be told whether a string is\n a pickled object or not (if the object is an instance of this class\n then it must [well, should] be a pickled one).\"\"\"\n pass\n\nclass PickledObjectField(models.Field):\n __metaclass__ = models.SubfieldBase\n\n def to_python(self, value):\n if isinstance(value, PickledObject):\n # If the value is a definite pickle; and an error is raised in de-pickling\n # it should be allowed to propogate.\n return pickle.loads(str(decodestring(value)))\n else:\n try:\n try:\n return pickle.loads(str(decodestring(value)))\n except binascii.Error:\n return pickle.loads(str(value))\n except:\n return value\n\n\n def get_db_prep_save(self, value):\n if value is not None and not isinstance(value, PickledObject):\n value = PickledObject(encodestring(pickle.dumps(value)))\n return value\n\n def get_internal_type(self):\n return 'TextField'\n\n def get_db_prep_lookup(self, lookup_type, value):\n if lookup_type == 'exact':\n value = self.get_db_prep_save(value)\n return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value)\n elif lookup_type == 'in':\n value = [self.get_db_prep_save(v) for v in value]\n return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value)\n else:\n raise TypeError('Lookup type %s is not supported.' % lookup_type)\n\nclass MarkupField(TextField):\n\n\n def __init__(self, renderer=None, *args, **kwargs):\n self._renderer_callback = renderer or self._defaultrenderer\n\n super(MarkupField, self).__init__(*args, **kwargs)\n\n @classmethod\n def _defaultrenderer(cls, markup, markup_type):\n\n return markup, '', textilize(markup), {}\n\n\n def contribute_to_class(self, cls, name):\n\n self._html_field = name + \"_html\"\n self._type_field = name + \"_markup_type\"\n self._version_field = name + \"_version\"\n self._summary_field = name + \"_summary_html\"\n self._text_field = name + \"_text\"\n self._data_field = name + \"_data\"\n\n CharField(\"Markup type\", blank=False, max_length=20, choices=MARKUP_TYPES, default=\"postmarkup\").contribute_to_class(cls, self._type_field)\n IntegerField(default=0).contribute_to_class(cls, self._version_field)\n TextField(editable=True, blank=True, default=\"\").contribute_to_class(cls, self._html_field)\n TextField(editable=True, blank=True, default=\"\").contribute_to_class(cls, self._summary_field)\n TextField(editable=False, blank=True, default=\"\").contribute_to_class(cls, self._text_field)\n PickledObjectField(editable=False, default={}, blank=True).contribute_to_class(cls, self._data_field)\n\n super(MarkupField, self).contribute_to_class(cls, name)\n\n\n def pre_save(self, model_instance, add):\n\n markup = getattr(model_instance, self.attname)\n markup_type = getattr(model_instance, self._type_field)\n\n html, summary_html, text, data = self._renderer_callback(markup, markup_type)\n\n setattr(model_instance, self._html_field, html)\n setattr(model_instance, self._summary_field, summary_html)\n setattr(model_instance, self._text_field, text)\n setattr(model_instance, self._data_field, data)\n return markup", "sub_path": "techblog/markup/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 4033, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.db.models.Field", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.SubfieldBase", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "pickle.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "base64.decodestring", "line_number": 35, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "base64.decodestring", "line_number": 39, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pickle.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "base64.encodestring", "line_number": 48, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 64, "usage_type": "name"}, {"api_name": "postmarkup.textilize", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "647443294", "text": "# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy.item import Item, Field\nimport base64\nimport MySQLdb\n\nclass GameappSpiderItem(Item):\n # define the fields for your item here like:\n # name = Field()\n pass\n\nclass ArticleItem(Item):\n t_when = Field()\n url = Field()\t\n title = Field()\n content = Field()\n image_urls = Field()\n game_id = Field()\n\n def _gen_insertone_sql(self):\n title = self['title']\n content = self['content']\n url = self['url']\n game_id = self['game_id']\n sql = \"\"\"\n insert into games_article \n (game_id, title, content, url ) \n values (\"%s\", \"%s\", \"%s\", \"%s\");\n \"\"\" % \\\n (game_id, MySQLdb.escape_string(title) , \\\n MySQLdb.escape_string(content), \\\n MySQLdb.escape_string(url))\n return sql\n\n", "sub_path": "gameapp_spider/gameapp_spider/items.py", "file_name": "items.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scrapy.item.Item", "line_number": 10, "usage_type": "name"}, {"api_name": "scrapy.item.Item", "line_number": 15, "usage_type": "name"}, {"api_name": "scrapy.item.Field", "line_number": 16, "usage_type": "call"}, {"api_name": "scrapy.item.Field", "line_number": 17, "usage_type": "call"}, {"api_name": "scrapy.item.Field", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapy.item.Field", "line_number": 19, "usage_type": "call"}, {"api_name": "scrapy.item.Field", "line_number": 20, "usage_type": "call"}, {"api_name": "scrapy.item.Field", "line_number": 21, "usage_type": "call"}, {"api_name": "MySQLdb.escape_string", "line_number": 33, "usage_type": "call"}, {"api_name": "MySQLdb.escape_string", "line_number": 34, "usage_type": "call"}, {"api_name": "MySQLdb.escape_string", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "574490836", "text": "'''this is a module that loads the meta data of the neural net and computes the performance'''\n# FIXME: only manually (roughly) checked for rnn scat case for both experiment data and simulation data.\n# Have not checked rnn raw data case.\n\n'''standard imports'''\nimport os\nimport numpy as np\nimport re\nimport torch\nimport matplotlib.pyplot as plt\nfrom itertools import product\nfrom matplotlib.pyplot import cm\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import SequentialSampler\n\n'''custom libraries'''\nimport common_utils as cu\nimport scat_utils as scu\nimport net_utils as nu\n\ndevice = 'cuda:0' # or cuda:0\n\n# for using part of the test data to make error bars, set the start and end index for the data\nidx_start = 900\nidx_end = idx_start + 100\n\n#min_loss_epochs = [1200, 1000, 9500, 7500, 350, 400, 2300, 2300] # 512 k\n#min_loss_epochs = [2000, 6000, 3500, 8500, 350, 1000, 3500, 3500] # 512 diff coef\n\n#min_loss_epochs = [2500, 1500, 3000, 3500, 500, 500, 700, 700] # 1024 k\n#min_loss_epochs = [2700, 1200, 3000, 9500, 400, 550, 550, 1300] # 1024 diff coef\n\n#min_loss_epochs = [1000, 5000, 7000, 9000, 200, 250, 1000, 1500] # 2048 k\n#min_loss_epochs = [2000, 5000, 9000, 5000, 300, 300, 1000, 3000] # 2048 diff coef\n\n\n#min_loss_epochs = [4000, 3500] # 512 k tbd_4\n#min_loss_epochs = [6500, 3000] # 512 diff coef tbd_4\n\n#min_loss_epochs = [4000, 3000] # 1024 k tbd_4\n#min_loss_epochs = [6000, 4000] # 1024 diff coef tbd_4\n\n#min_loss_epochs = [3000, 3000] # 2048 k tbd_4\nmin_loss_epochs = [5500, 4000] # 2048 diff coef tbd_4\n\n\n#min_loss_epochs = [5500, 700] # 512_train_val_81_test_702\n#min_loss_epochs = [10000, 1700] # 512_train_val_405_test_378\n#min_loss_epochs = [4600, 2500] # 2048_train_val_81_test_108\n#min_loss_epochs = [9300, 1800] # 2048_train_val_135_test_54\n\nroot_dir = './data/simulations/data_len_2048_gamma_1_3_k_1_7_t_4_10/models'\n#root_dir = './data/experiments/bead/2020_0305/data_len_2048_train_val_135_test_54/models'\n#root_dir = './data/experiments/bead/2020_0228/'\n#root_dir = './data/experiments/bead/2020_0305/data_len_256_poly_train_val_ratio_0p2/models'\n#root_dir = './data/experiments/irfp'\n\n# file name of test data\n# TWO BEADS\nfile_names_test = ['tbd_0_test.pt',\n 'tbd_0_test_scat_0.pt']\n#file_name_test = 'tbd_0_test_scat_0.pt'\n#file_name_test = 'tbd_0_test_scat_1.pt'\n\n\"\"\"\nfile_names_test = [\n 'tbd_0_test.pt',\n 'tbd_0_test.pt',\n 'tbd_0_test.pt',\n 'tbd_0_test.pt',\n 'tbd_0_test_scat_0.pt',\n 'tbd_0_test_scat_0.pt',\n 'tbd_0_test_scat_0.pt',\n 'tbd_0_test_scat_0.pt',\n ]\n\"\"\"\n\n#file_names_test = ['data_test.pt', 'data_test_scat_0.pt']\n\n# IRFP\n\n\n\n# LIST of file names of trained models\n# TWO BEADS\n\"\"\"\nfile_names_meta = [\n 'tbd_0_meta_rnn_4_k_ratios.pt',\n 'tbd_1_meta_rnn_5_k_ratios.pt',\n 'tbd_2_meta_rnn_0_k_ratios.pt',\n 'tbd_3_meta_rnn_1_k_ratios.pt',\n 'tbd_0_scat_0_meta_rnn_3_k_ratios.pt',\n 'tbd_1_scat_0_meta_rnn_1_k_ratios.pt',\n 'tbd_2_scat_0_meta_rnn_1_k_ratios.pt',\n 'tbd_3_scat_0_meta_rnn_0_k_ratios.pt',\n ]\nfile_names_meta = [\n 'tbd_0_meta_rnn_2_diff_coef_ratios.pt',\n 'tbd_1_meta_rnn_4_diff_coef_ratios.pt',\n 'tbd_2_meta_rnn_1_diff_coef_ratios.pt',\n 'tbd_3_meta_rnn_0_diff_coef_ratios.pt',\n 'tbd_0_scat_0_meta_rnn_2_diff_coef_ratios.pt',\n 'tbd_1_scat_0_meta_rnn_0_diff_coef_ratios.pt',\n 'tbd_2_scat_0_meta_rnn_0_diff_coef_ratios.pt',\n 'tbd_3_scat_0_meta_rnn_1_diff_coef_ratios.pt',\n ]\n\"\"\"\n\n\n\"\"\"\nfile_names_meta = [\n cu.match_filename('(tbd_0_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_1_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_2_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_3_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_0_scat_0_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_1_scat_0_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_2_scat_0_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_3_scat_0_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n ]\nfor file_name_meta in file_names_meta:\n assert(len(file_name_meta) == 1), \"Invalid number of files. Should be only 1 trained model for each case\"\nfile_names_meta = [file_name_meta[0] for file_name_meta in file_names_meta]\n\nfile_names_meta = [\n cu.match_filename('(tbd_0_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_1_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_2_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_3_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_0_scat_0_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_1_scat_0_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_2_scat_0_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_3_scat_0_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n ]\nfor file_name_meta in file_names_meta:\n assert(len(file_name_meta) == 1), \"Invalid number of files. Should be only 1 trained model for each case\"\nfile_names_meta = [file_name_meta[0] for file_name_meta in file_names_meta]\n\nfile_names_meta = [\n cu.match_filename('(tbd_4_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n cu.match_filename('(tbd_4_scat_0_meta_rnn_[0-9]+_k_ratios.pt)', root_dir),\n ]\nfor file_name_meta in file_names_meta:\n assert(len(file_name_meta) == 1), \"Invalid number of files. Should be only 1 trained model for each case\"\nfile_names_meta = [file_name_meta[0] for file_name_meta in file_names_meta]\n\n\"\"\"\nfile_names_meta = [\n cu.match_filename('(tbd_4_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n cu.match_filename('(tbd_4_scat_0_meta_rnn_[0-9]+_diff_coef_ratios.pt)', root_dir),\n ]\nfor file_name_meta in file_names_meta:\n assert(len(file_name_meta) == 1), \"Invalid number of files. Should be only 1 trained model for each case\"\nfile_names_meta = [file_name_meta[0] for file_name_meta in file_names_meta]\n\n\n\n\n#file_names_meta = ['data_meta_rnn_1.pt', 'data_scat_0_meta_rnn_1.pt']\n\n# IRFP\n#file_names_meta = ['data_meta_rnn_11.pt', 'data_scat_0_meta_rnn_11.pt']\n# OR, provide file names and paths using regular expression\n#file_paths_meta = glob.glob(os.path.join(root_dir, 'tbd_0_scat_meta_rnn_*.pt'))\n#file_names_meta = [os.path.basename(file_path) for file_path in file_paths]\n#file_names = ['tbd_1_scat.pt'] * len(file_paths_meta)\n\nbatch_size = 40 # batch size when performing forward propagation on test data using trained weights\n\nfile_paths_test = [os.path.join(root_dir, file_name_test) for file_name_test in file_names_test]\nfile_paths_meta = [os.path.join(root_dir, file_name_meta) for file_name_meta in file_names_meta]\nn_files = len(file_paths_meta)\n\nfor idx_file in range(n_files):\n\n file_path_meta = file_paths_meta[idx_file]\n file_name_meta = file_names_meta[idx_file]\n file_path_test = file_paths_test[idx_file]\n\n samples = torch.load(file_path_test)\n data, labels, label_names = samples['data'], samples['labels'], samples['label_names']\n n_data_total = len(data) \n if device == 'cpu':\n meta = torch.load(file_path_meta, map_location='cpu')\n else:\n meta = torch.load(file_path_meta, map_location='cuda:0')\n # compute the nearest epoch number\n min_loss_epoch = min_loss_epochs[idx_file]\n idx_min_loss_epoch = np.argmin(np.abs((np.array(meta['epoch']) - min_loss_epoch)))\n classifier = meta['classifier']\n elapsed = meta['elapsed'][idx_min_loss_epoch]\n epoch = meta['epoch'][idx_min_loss_epoch]\n if not meta['classifier']:\n idx_label = samples['label_names'].index(meta['label_names'])\n labels = labels[idx_label]\n label_names = label_names[idx_label]\n # reshape data. output is shaped (n_data_total, n_channels * (n_scat_nodes), data_len).\n # (n_scat_nodes) means 1 if data not transformed\n if isinstance(data, np.ndarray):\n data = np.reshape(data, (n_data_total, -1, data.shape[-1]))\n elif isinstance(data, list):\n data = [np.reshape(data_slice, (-1, data_slice.shape[-1])) for data_slice in data]\n else:\n raise ValueError(\"Invalid type of data given\")\n\n # take out only a fraction of the test data\n data = data[idx_start:idx_end]\n labels = labels[idx_start:idx_end]\n n_data_total = len(data)\n\n input_size = data[0].shape[0]\n output_size = meta['output_size']\n dataset = nu.TimeSeriesDataset(data, labels, transform=nu.ToTensor())\n dataloader = DataLoader(dataset, sampler=SequentialSampler(range(n_data_total)),\n batch_size=batch_size, collate_fn=nu.collate_fn, num_workers=0)\n\n if device == 'cpu':\n rnn = nu.RNN(input_size=meta['input_size'], hidden_size=meta['hidden_size'],\n output_size=meta['output_size'], n_layers=meta['n_layers'], bidirectional=meta['bidirectional'])\n else:\n rnn = nu.RNN(input_size=meta['input_size'], hidden_size=meta['hidden_size'],\n output_size=meta['output_size'], n_layers=meta['n_layers'], bidirectional=meta['bidirectional']).cuda()\n rnn.load_state_dict(meta['model'][idx_min_loss_epoch])\n del meta\n #criterion = nn.CrossEntropyLoss(reduction='sum') if classifier else nn.MSELoss(reduction='sum')\n #metric = 'cross_entropy_mean' if classifier else 'rmse'\n loss_sum = {}\n loss_metric = {}\n loss_sum = 0.\n outputs = []\n for batch in dataloader:\n # permute s.t. shape is (data_len, n_data_total, n_channels * (n_scat_nodes))\n if device == 'cpu':\n batch_data = batch['data'].permute([2, 0, 1])\n input_lens = batch['input_lens'].type(torch.LongTensor)\n else:\n batch_data = batch['data'].permute([2, 0, 1]).cuda()\n input_lens = batch['input_lens'].type(torch.cuda.LongTensor)\n output = rnn(batch_data, input_lens=input_lens)\n # for regression, output of rnn is shaped (batch_size, 1). drop dummy axis\n if classifier:\n output = output.argmax(axis=1).detach().cpu().numpy()\n else:\n output = output[:, 0].detach().cpu().numpy()\n outputs.append(output)\n\n outputs = np.concatenate(outputs, axis=0)\n if classifier:\n accuracy = sum(outputs == np.array(labels)) / n_data_total\n print(\"file_name:{}, epoch:{}, accuracy(%):{:.2f}, elapsed:{:.1f}\"\n .format(file_name_meta, epoch, accuracy * 100, elapsed))\n else:\n rmse = np.sqrt(sum((outputs - labels)**2) / n_data_total)\n print(\"file_name:{}, epoch:{}, rmse:{:.2f}, elapsed:{:.1f}\"\n .format(file_name_meta, epoch, rmse, elapsed))\n\n\n", "sub_path": "compute_predictions_fraction.py", "file_name": "compute_predictions_fraction.py", "file_ext": "py", "file_size_in_byte": 10664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "common_utils.match_filename", "line_number": 149, "usage_type": "call"}, {"api_name": "common_utils.match_filename", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 202, "usage_type": "call"}, {"api_name": "net_utils.TimeSeriesDataset", "line_number": 213, "usage_type": "call"}, {"api_name": "net_utils.ToTensor", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.utils.data.sampler.SequentialSampler", "line_number": 214, "usage_type": "call"}, {"api_name": "net_utils.collate_fn", "line_number": 215, "usage_type": "attribute"}, {"api_name": "net_utils.RNN", "line_number": 218, "usage_type": "call"}, {"api_name": "net_utils.RNN", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 235, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 253, "usage_type": "call"}]} +{"seq_id": "600508951", "text": "import os\nimport core.log\nimport logging\nimport json as JSON\nimport boto3\nfrom core.param_store import ParamStore\nimport core.post_schema as ps\n\n\ndef handler(event, context):\n \"\"\"\n Handles the dispatch event.\n \"\"\"\n logging.debug('Event received: {}'.format(JSON.dumps(event)))\n\n if not ps.validate(event):\n raise Exception('Invalid JSON object schema.')\n\n if not _publish_sqs_message(event):\n raise Exception('Failed to publish SQS message.')\n\n response = {\n \"statusCode\": 200\n }\n\n logging.debug('Event received: {}'.format(JSON.dumps(event)))\n return response\n\n\ndef _publish_sqs_message(message):\n \"\"\"\n Publishes a message to SQS so it can trigger a worker.\n \"\"\"\n sqs_response = None\n try:\n message_json = JSON.dumps(message)\n\n logging.debug('Publish SQS Message: {}'.format(message_json))\n\n client = boto3.client('sqs')\n sqs_response = client.send_message(\n QueueUrl=ParamStore.SQS_DISPATCH_QUEUE_URL(), MessageBody=message_json)\n\n logging.debug('Publish SQS Response: {}'.format(\n JSON.dumps(sqs_response)))\n except Exception as e:\n logging.exception(e)\n\n return sqs_response and sqs_response.get('ResponseMetadata').get('HTTPStatusCode') == 200\n", "sub_path": "functions/dispatchers/dispatcher.py", "file_name": "dispatcher.py", "file_ext": "py", "file_size_in_byte": 1285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.debug", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "core.post_schema.validate", "line_number": 16, "usage_type": "call"}, {"api_name": "core.post_schema", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 38, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 40, "usage_type": "call"}, {"api_name": "core.param_store.ParamStore.SQS_DISPATCH_QUEUE_URL", "line_number": 42, "usage_type": "call"}, {"api_name": "core.param_store.ParamStore", "line_number": 42, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 44, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "549702587", "text": "import unittest\nfrom jigsolver.puzzle import *\nimport matplotlib.pyplot as plt\n\nclass PlacePieceTestCase(unittest.TestCase):\n def setUp(self):\n img = plt.imread('img/peppers.png')[:,:,:3]\n self.puzzle = Puzzle(img,patch(28))\n # puzzle of size 4 × 4\n\n def test_place_piece_should_update_is_placed_attribute(self):\n piece = self.puzzle.bag_of_pieces[3]\n self.puzzle.place(piece, (2, 2))\n self.assertTrue(piece._is_placed, True)\n \n def test_place_piece_twice_should_raise_error(self):\n piece = self.puzzle.bag_of_pieces[3]\n self.puzzle.place(piece, (2, 2))\n with self.assertRaises(AssertionError):\n self.puzzle.place(piece, (3, 3))\n\n def test_place_piece_in_occupied_slot_should_raise_error(self):\n piece = self.puzzle.bag_of_pieces[3]\n self.puzzle.place(piece, (2, 2))\n with self.assertRaises(AssertionError):\n piece2 = self.puzzle.bag_of_pieces[4]\n self.puzzle.place(piece, (2, 2))\n\n # test for the OLDER version\n # def test_place_pieces_should_update_available_slots(self):\n # piece = self.puzzle.bag_of_pieces[1]\n # self.puzzle.place(piece, (1, 1))\n # piece = self.puzzle.bag_of_pieces[2]\n # self.puzzle.place(piece, (1, 2))\n #\n # computed_available_coords = set(available_positions(self.puzzle))\n # true_available_coords = set([(0,1), (0,2), (1,0), (1,3), (2,1), (2,2)])\n #\n # self.assertEqual(len(computed_available_coords), 6)\n # self.assertSetEqual(computed_available_coords, true_available_coords)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests/placer_test.py", "file_name": "placer_test.py", "file_ext": "py", "file_size_in_byte": 1658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "343031023", "text": "#!/bin/python\n#\n# First argument: scenario file\n\nimport sys\nimport os\nimport logging\nfrom subprocess import Popen,PIPE\n\nfrom pysmac.utils.smac_input_readers import read_scenario_file\nfrom pysmac.utils.smac_output_readers import read_instances_file\n\nBIN = \"/mhome/lindauer/bin/SAT-features-competition2012/featuresSAT12\"\nOPTS = \"-base\"\nACLIB_HOME = \"/data/aad/aclib/\"\nRUNSOLVER = os.path.join(ACLIB_HOME, \"target_algorithms\", \"runsolver\", \"runsolver\")\nTIME = 60\nMEM = 2000\n\nif not os.path.islink(\"instances\"):\n print(os.path.join(ACLIB_HOME, \"instances\"))\n os.symlink(os.path.join(ACLIB_HOME, \"instances\"), \"instances\")\n\n\nscen = read_scenario_file(sys.argv[1])\n\ntrain_fn = scen[\"instance_file\"]\ntest_fn = scen[\"test_instance_file\"]\n\ntrain_insts = map(lambda x : x[0], read_instances_file(train_fn)) # remove instance specifics\ntest_insts = map(lambda x : x[0], read_instances_file(test_fn))\n\ninsts = train_insts\ninsts.extend(test_insts)\n\n\nfirst = True\nfor i in insts:\n cmd = [RUNSOLVER, \"-W\", str(TIME), \"-M\", str(MEM), \"-w\", \"/dev/null\", BIN, OPTS, i]\n cmd = \" \".join(cmd)\n logging.debug(\"CMD: %s\" %(cmd))\n p = Popen(cmd, shell=True, stdout=PIPE)\n stdout, stderr = p.communicate()\n feats = stdout.split(\"\\n\")[-2]\n if first:\n first = False\n header = stdout.split(\"\\n\")[-3]\n print(\"instance,%s\" %(header))\n \n print(\"%s,%s\" %(i,feats))\n \n \n \n \n\n\n\n\n\n\n\n", "sub_path": "aclib/src/helpers/generate_sat_features.py", "file_name": "generate_sat_features.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.islink", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.symlink", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pysmac.utils.smac_input_readers.read_scenario_file", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pysmac.utils.smac_output_readers.read_instances_file", "line_number": 30, "usage_type": "call"}, {"api_name": "pysmac.utils.smac_output_readers.read_instances_file", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 41, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 42, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "564329807", "text": "\"\"\"\nWSGI config for peanut project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/\n\"\"\"\n\nimport os, sys, site\n\nsite.addsitedir('/home/ubuntu/env/local/lib/python2.7/site-packages')\n\nsys.path.append('/home/ubuntu/dev/Duffy/peanut')\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"peanut.settings.dev\")\n\n# Activate your virtual env\nactivate_env=os.path.expanduser(\"/home/ubuntu/env/bin/activate_this.py\")\nexecfile(activate_env, dict(__file__=activate_env))\n\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n", "sub_path": "peanut/peanut/wsgi_dev.py", "file_name": "wsgi_dev.py", "file_ext": "py", "file_size_in_byte": 678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "site.addsitedir", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.core.wsgi.get_wsgi_application", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "486030590", "text": "import requests\nfrom bs4 import BeautifulSoup\n#if__name__ == \"__main__\":\n\nurl = input('enter the url: ')\n\ndef get(url):\n links =[]\n web = requests.get(url)\n web_Text = web.text\n soup = BeautifulSoup(web_Text, features=\"html.parser\")\n for link in soup.find_all('a'):\n links.append(link.get('href'))\n for link in links:\n print(link)\n\n #print(len(links))\n print(\"The number of links in \", url, \"is\",len(links))\n\n#get(\"http://fb.com\")\nget(url)\n", "sub_path": "projects/getting_web_links/getting_links.py", "file_name": "getting_links.py", "file_ext": "py", "file_size_in_byte": 478, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "300270186", "text": "\"\"\"\nFunctions to prioritise labelling data points (to drive active learning).\n\nThis module implements a range of functions that produce ordering of data based\non class probabilities.\n\"\"\"\n\nfrom typing import List\n\nimport numpy as np\nimport scipy.stats\n\nfrom ..prioritisation import _shuffle_subset\n\n\ndef entropy(\n probabilities: List[np.ndarray], shuffle_prop: float = 0.1\n) -> np.ndarray:\n \"\"\"\n Sort by the entropy of the probabilities (high to low).\n\n Parameters\n ----------\n probabilities : np.ndarray\n An array of probabilities, with the shape n_samples,\n n_classes\n shuffle_prop : float\n The proportion of data points that should be randomly shuffled. This\n means the sorting retains some randomness, to avoid biasing your\n new labels and catching any minority classes the algorithm currently\n classifies as a different label.\n\n \"\"\"\n entropies = sum(\n [\n -scipy.stats.entropy(probability_array.T)\n for probability_array in probabilities\n ]\n ) / len(probabilities)\n ordered = np.argsort(entropies)\n return _shuffle_subset(ordered.argsort(), shuffle_prop)\n\n\ndef margin(probabilities: List[np.ndarray], shuffle_prop=0.1):\n \"\"\"\n Sort by the margin between the top two predictions (low to high).\n\n Parameters\n ----------\n probabilities : np.ndarray\n An array of probabilities, with the shape n_samples,\n n_classes\n shuffle_prop : float\n The proportion of data points that should be randomly shuffled. This\n means the sorting retains some randomness, to avoid biasing your\n new labels and catching any minority classes the algorithm currently\n classifies as a different label.\n\n \"\"\"\n margins = sum(\n [\n np.sort(probability_array, axis=1)[:, -1]\n - np.sort(probability_array, axis=1)[:, -2]\n for probability_array in probabilities\n ]\n ) / len(probabilities)\n ordered = np.argsort(margins)\n return _shuffle_subset(ordered.argsort(), shuffle_prop)\n\n\ndef certainty(probabilities, shuffle_prop=0.1):\n \"\"\"\n Sort by the certainty of the maximum prediction.\n\n Parameters\n ----------\n probabilities : np.ndarray\n An array of probabilities, with the shape n_samples,\n n_classes\n shuffle_prop : float\n The proportion of data points that should be randomly shuffled. This\n means the sorting retains some randomness, to avoid biasing your\n new labels and catching any minority classes the algorithm currently\n classifies as a different label.\n\n \"\"\"\n certainties = sum(\n [\n np.max(probability_array, axis=1)\n for probability_array in probabilities\n ]\n ) / len(probabilities)\n ordered = np.argsort(certainties)\n return _shuffle_subset(ordered.argsort(), shuffle_prop)\n\n\nfunctions = {\"entropy\": entropy, \"margin\": margin, \"certainty\": certainty}\n\"\"\"A dictionary of functions to prioritise data.\"\"\"\n", "sub_path": "superintendent/multioutput/prioritisation.py", "file_name": "prioritisation.py", "file_ext": "py", "file_size_in_byte": 3027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scipy.stats.stats.entropy", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 36, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 40, "usage_type": "call"}, {"api_name": "prioritisation._shuffle_subset", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 67, "usage_type": "call"}, {"api_name": "prioritisation._shuffle_subset", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 93, "usage_type": "call"}, {"api_name": "prioritisation._shuffle_subset", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "485972917", "text": "# -*- coding:utf-8 -*-\nfrom selenium import webdriver\n\nfrom time import sleep\nimport json\nimport unittest\nimport os\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom base import Base\nfrom config import Config\n\nclass Test_boss(unittest.TestCase):\n\n def setUp(self) -> None:\n # 读取用户本地缓存\n option = webdriver.ChromeOptions()\n option.add_argument(r\"user-data-dir=C:\\Users\\EDZ\\AppData\\Local\\Google\\Chrome\\User Data\") # 浏览器路径\n # 初始化driver\n self.driver = webdriver.Chrome(options=option)\n\n # self.driver = webdriver.Chrome()\n self.driver.get(\"https://www.zhipin.com/web/geek/recommend\")\n sleep(3)\n self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n\n sleep(3)\n\n self.b = Base(driver=self.driver)\n self.c = Config()\n\n\n\n def test_search(self):\n self.b.search_job()\n data = self.b.turnPage(\"SQL\")\n print(\"data -> %s\" % data)\n # 存入 excel\n fileName = os.path.dirname(os.path.abspath(__file__)) + \"/boss.xlsx\"\n self.c.sava_to_excel(self, data=data, fileName=fileName)\n\n def tearDown(self) -> None:\n sleep(3)\n self.driver.close()\n\nif __name__ == '__main__':\n unittest.main()\n\n\n", "sub_path": "test_boss.py", "file_name": "test_boss.py", "file_ext": "py", "file_size_in_byte": 1291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "base.Base", "line_number": 30, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "488034259", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\n@Author: YouShumin\n@Date: 2020-06-08 11:16:24\n@LastEditTime: 2020-06-10 10:55:17\n@LastEditors: YouShumin\n@Description: \n@FilePath: /frame_tornado/run_server.py\n'''\nimport os\nimport sys\n\nimport tornado.options\nfrom tornado.options import define, options\nfrom configs.setting import PROJECT_NAME\nimport logging\n\nLOG = logging.getLogger(__name__)\ntry:\n import sentry_sdk\n from sentry_sdk.integrations.tornado import TornadoIntegration\n\n sentry_sdk.init(\n dsn=\"https://b1696404710445e79550eb272ab9b5c1@sentry.io/1818061\",\n integrations=[TornadoIntegration()])\nexcept Exception as e:\n print(e)\n\n\nclass AppMain:\n def __init__(self):\n PATH_APP_ROOT = os.path.abspath(\n os.path.join(os.path.abspath(os.path.dirname(__file__))))\n if PATH_APP_ROOT not in sys.path:\n sys.path.insert(0, PATH_APP_ROOT)\n define(\"APP_PATH\", default=PATH_APP_ROOT, help=\"app run dir\")\n from app import web_app\n\n self._web_app = web_app()\n\n def start(self):\n return self._web_app.run()\n\n def stop(self):\n return self._web_app.stop()\n\n\nif __name__ == \"__main__\":\n main = AppMain()\n try:\n main.start()\n except KeyboardInterrupt:\n main.stop()\n", "sub_path": "run_server.py", "file_name": "run_server.py", "file_ext": "py", "file_size_in_byte": 1285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "sentry_sdk.init", "line_number": 24, "usage_type": "call"}, {"api_name": "sentry_sdk.integrations.tornado.TornadoIntegration", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tornado.options.define", "line_number": 37, "usage_type": "call"}, {"api_name": "app.web_app", "line_number": 40, "usage_type": "call"}, {"api_name": "{'web_app': 'app.web_app'}", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "189536667", "text": "import time\nimport numpy as np\nimport cv2 as cv\nimport sys\nimport math\nimport pickle\nsys.path.insert(0, r'./TadroBeaconTracker/tadro-tracker/2Led/')\n# load Config\nfrom config import D as CFG\nfrom utils import *\n\nclass Track2Led:\n # def __init__(self, SETTINGS, DATA):\n # self.SETTINGS = SETTINGS\n # self.DATA = DATA\n def __init__(self, DATA):\n DATA.created_images = False\n self.time = time.clock()\n\n def init_images(self, DATA, SETTINGS):\n shape = DATA.processed_image.shape\n\n # Create images for each color channel\n DATA.red_image = np.zeros(shape)\n DATA.blue_image = np.zeros(shape)\n DATA.green_image = np.zeros(shape)\n #DATA.hue_image = np.zeros(shape)\n #DATA.sat_image = np.zeros(shape)\n #DATA.val_image = np.zeros(shape)\n\n DATA.red_image_threshed = np.eye(*shape)\n DATA.green_image_threshed = np.eye(*shape)\n DATA.blue_threshed_image = np.eye(*shape)\n #DATA.hue_threshed_image = np.eye(*shape)\n #DATA.sat_threshed_image = np.eye(*shape)\n #DATA.val_threshed_image = np.eye(*shape)\n # The final thresholded result\n DATA.threshed_images = [None, None] # tablca przechowująca wynikowe thresholdy, [object, objct]\n DATA.current_threshold = DATA.threshed_images\n DATA.threshed_images[CFG.LEFT_LD] = np.eye(*shape) #operator unpacking\n DATA.threshed_images[CFG.RIGHT_LD] = np.eye(*shape)\n # Create an hsv image and a copy for contour-finding\n DATA.hsv = np.eye(*shape)\n DATA.copy = np.eye(*shape)\n #DATA.storage = cv.CreateMemStorage(0) # Create memory storage for contours\n\n # bunch of keypress values\n # So we know what to show, DATAepenDATAing on which key is presseDATA\n DATA.key_dictionary = {\n #ord('w'): DATA.threshed_images,\n #ord('u'): DATA.red_image,\n #ord('i'): DATA.green_image,\n #ord('o'): DATA.blue_image,\n #ord('j'): DATA.red_image_threshed,\n #ord('k'): DATA.green_image_threshed,\n #ord('l'): DATA.blue_threshed_image,\n #ord('a'): DATA.hue_image,\n #ord('s'): DATA.sat_image,\n #ord('d'): DATA.val_image,\n #ord('z'): DATA.hue_threshed_image,\n #ord('x'): DATA.sat_threshed_image,\n #ord('c'): DATA.val_threshed_image,\n }\n #wyrzucic, kalibracja w osobnym programie ze zdjec i zapis od pliku pickle i odczyt rownie, metody juz gotowe w tym module od odcz/zaps\n #Obtain the image from the camera calibration to subtract from the captured image\n if CFG.CAMERA_CALIBRATION_UNDISTORT:\n data_file = open(CFG.CAMERA_CALIBRATION_PATH, 'rb')\n calib_data = pickle.load(data_file)\n SETTINGS.mtx = calib_data['mtx']\n SETTINGS.dist = calib_data['dist']\n data_file.close()\n\n \"\"\" cap = cv.VideoCapture(DATA.CAMERA_CALIBRATION_PATH)\n #cap = cv.VideoCapture('C:/Users/barte/DATAocuments/Studia VII/Image_processing/Assets/Green_Blue_Led.avi')\n if(not cap.isOpened()):\n raise NameError(\"Invalid camera calibration file path. Turn off camera calibration subtraction or correct.\")\n else:\n print(\"Camera calibration path exists.\")\n for i in range(0, DATA.NUM_CALIBRATION_FRAMES_TO_SKIP):\n cap.read()\n ret, frame = cap.read()\n\n if (DATA.PLAY_IN_LOOP == True):\n frame_counter += 1\n #If the last frame is reached, reset the capture and the frame_counter\n CV_CAP_PROP_FRAME_COUNT = 7\n if frame_counter == cap.get(CV_CAP_PROP_FRAME_COUNT):\n frame_counter = 0 #Or whatever as long as it is the same as next line\n CV_CAP_PROP_POS_FRAMES = 1\n cap.set(CV_CAP_PROP_POS_FRAMES, 0)\n\n DATA.calibration_image = frame\n \n R = Robot2Led(30, (12,32), np.pi/2,24.52,423.342) \n return R.print()\"\"\"\n pass\n\n def threshold_image(self, DATA, SETTINGS):\n \"\"\" runs the image processing in order to create a \n black and white thresholded image out of DATA.processed_image\n into DATA.threshed_images.\n \"\"\"\n\n if CFG.CAMERA_CALIBRATION_UNDISTORT:\n #DATA.processed_image = cv.subtract(DATA.processed_image, DATA.calibration_image)\n DATA.processed_image = cv.undistort(DATA.processed_image, SETTINGS.mtx, SETTINGS.dist, None, SETTINGS.mtx)\n\n # DATA.processed_image.shape[2] gives the number of channels\n DATA.BGRchannels = cv.split(DATA.processed_image)\n #print DATA.BGRchannels\n DATA.blue_image= DATA.BGRchannels[0]\n DATA.green_image = DATA.BGRchannels[1]\n DATA.red_image = DATA.BGRchannels[2]\n\n # This line creates a hue-saturation-value image\n #DATA.hsv = cv.cvtColor(DATA.processed_image, cv.COLOR_BGR2HSV)\n #print DATA.processed_image.shape\n #print DATA.hsv\n #print DATA.hsv.shape\n #print cv.split(DATA.hsv)\n #DATA.HSVchannels = cv.split(DATA.hsv)\n #print DATA.HSVchannels\n #DATA.hue_image = DATA.HSVchannels[0]\n #DATA.sat_image = DATA.HSVchannels[1]\n #DATA.val_image = DATA.HSVchannels[2]\n\n shape = DATA.processed_image.shape\n \n for i in range(len(SETTINGS.thresholds)):\n DATA.red_threshed_image = np.eye(*shape)\n DATA.blue_threshed_image = np.eye(*shape)\n DATA.green_threshed_image = np.eye(*shape)\n #DATA.hue_threshed_image = np.eye(*shape)\n #DATA.sat_threshed_image = np.eye(*shape)\n #DATA.val_threshed_image = np.eye(*shape)\n\n DATA.threshed_images[i] = np.eye(*shape)\n\n try:\n DATA.red_threshed_image = cv.inRange(\n DATA.red_image, SETTINGS.thresholds[i][\"low_red\"], SETTINGS.thresholds[i][\"high_red\"], DATA.red_threshed_image)\n DATA.blue_threshed_image = cv.inRange(\n DATA.blue_image, SETTINGS.thresholds[i][\"low_blue\"], SETTINGS.thresholds[i][\"high_blue\"], DATA.blue_threshed_image)\n DATA.green_threshed_image = cv.inRange(\n DATA.green_image, SETTINGS.thresholds[i][\"low_green\"], SETTINGS.thresholds[i][\"high_green\"], DATA.green_threshed_image)\n #DATA.hue_threshed_image = cv.inRange(\n # DATA.hue_image, SETTINGS.thresholds[i][\"low_hue\"], SETTINGS.thresholds[i][\"high_hue\"], DATA.hue_threshed_image)\n #DATA.sat_threshed_image = cv.inRange(\n # DATA.sat_image, SETTINGS.thresholds[i][\"low_sat\"], SETTINGS.thresholds[i][\"high_sat\"], DATA.sat_threshed_image)\n #DATA.val_threshed_image = cv.inRange(\n # DATA.val_image, SETTINGS.thresholds[i][\"low_val\"], SETTINGS.thresholds[i][\"high_val\"], DATA.val_threshed_image)\n except:\n pass\n\n #mnożenie do wynikowego threshold_iamges\n DATA.threshed_images[i] = cv.multiply(\n DATA.red_threshed_image, DATA.green_threshed_image, DATA.threshed_images[i])\n DATA.threshed_images[i] = cv.multiply(\n DATA.threshed_images[i], DATA.blue_threshed_image, DATA.threshed_images[i])\n # DATA.threshed_images[i] = cv.multiply(\n # DATA.threshed_images[i], DATA.hue_threshed_image, DATA.threshed_images[i])\n # DATA.threshed_images[i] = cv.multiply(\n # DATA.threshed_images[i], DATA.sat_threshed_image, DATA.threshed_images[i])\n # DATA.threshed_images[i] = cv.multiply(\n # DATA.threshed_images[i], DATA.val_threshed_image, DATA.threshed_images[i])\n\n #DATA.threshed_images = cv.dilate(DATA.threshed_images, None, iterations=2)\n\n #cv.imshow(DATA.threshed_images)\n # erozja dylatacja w zaleznosci od potrzeb\n #cv.erode(DATA.threshed_images, DATA.threshed_images, iterations = 1)\n #cv.dilate(DATA.threshed_images, DATA.threshed_images, iterations = 1)\n #kernel = np.ones((5,5),np.uint8)\n #for i in range(len(DATA.threshed_images)):\n # erosion = cv.erode(DATA.threshed_images[i],kernel,iterations = 1)\n # dilation = cv.dilate(DATA.threshed_images[i],kernel,iterations = 1)\n\n def check_LED(self, x1, y1, x2, y2):\n #sprawdzenie, czy led są w bliskiej odległości\n\n #later, it would be nice to know exactly how far apart they should be based on the skew grid\n #and build a stronger heuristic from that\n MAX_DIST = 500\n MIN_DIST = 1\n dist = math.sqrt(abs(int(x2) - int(x1))**2 + abs(int(y2) - int(y1))**2)\n\n result = MIN_DIST < dist < MAX_DIST\n\n return result\n\n def find_2Led(self, DATA, SETTINGS):\n \"\"\" finds all the contours in threshed image, finds the largest of those,\n and then marks in in the main image\n \"\"\"\n # initialize list of LED posns to len of thresholds\n LED = [0 for k in range(len(SETTINGS.thresholds))]\n\n for i in range(len(DATA.threshed_images)):\n # Create a copy image of thresholds then find contours on that image\n DATA.copy = DATA.threshed_images[i].copy() # copy threshed image\n\n # find all of the contours\n _, contours, _ = cv.findContours(DATA.copy, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\n # znajdz najwiekszy kontur\n # this is the standard algorithm: #zastąpic sortowaniem np.sort() lub sorted\n \n if len(contours) > 0:\n \"\"\" biggest = contours[0]\n second_biggest = contours[0]\n biggestArea = cv.contourArea(contours[0]) #get first contour\n secondArea = cv.contourArea(contours[0])\n for x in contours:\n nextArea = cv.contourArea(x)\n if biggestArea < nextArea:\n second_biggest = biggest\n biggest = x\n secondArea = biggestArea\n biggestArea = nextArea\n \"\"\"\n #to samo : cnts = sorted(cnts, key=cv2.contourArea, reverse=True)\n cnts = sorted(contours, key=cv.contourArea, reverse=True)\n biggest = cnts[0]\n\n # umieszczenie bouding rect na konturze\n br = cv.boundingRect(biggest)\n\n # Make a bounding box around the biggest blob\n upper_left = (br[0], br[1])\n lower_left = (br[0], br[1] + br[3])\n lower_right = (br[0] + br[2], br[1] + br[3])\n upper_right = (br[0] + br[2], br[1])\n cv.polylines(DATA.base_image, [np.array([upper_left,lower_left,lower_right,upper_right], dtype=np.int32)],\n 1, (255, 0, 0))\n cv.polylines(DATA.threshed_images[i], [np.array([upper_left,lower_left,lower_right,upper_right], dtype=np.int32)],\n 1, (255, 0, 0))\n cv.polylines(DATA.threshed_images[i], [np.array([upper_left,lower_left,lower_right,upper_right], dtype=np.int32)],\n 1, (255, 0, 0))\n\n #zachowaj blob( contour ) dla diody\n LED[i] = biggest\n #print biggest\n #print second_biggest\n # liczenie momentu dla każdego z konturów\n moment0 = cv.moments(LED[0])\n moment1 = cv.moments(LED[1])\n #h, w, c = DATA.base_image.shape\n if (moment0['m00'] > 0):\n center_x = moment0['m10']/moment0['m00']\n center_x = map_img_to_real(center_x, DATA.area_width_captured, CFG.AREA_WIDTH_REAL)\n\n center_y = moment0['m01']/moment0['m00']\n center_y = map_img_to_real(center_y, DATA.area_height_captured, CFG.AREA_HEIGHT_REAL)\n DATA.led1_pos = (center_x, center_y)\n else:\n DATA.led1_pos = None\n\n if (moment1['m00'] > 0):\n second_center_x = moment1['m10']/moment1['m00']\n second_center_x = map_img_to_real(second_center_x, DATA.area_width_captured, CFG.AREA_WIDTH_REAL)\n\n second_center_y = moment1['m01']/moment1['m00']\n second_center_y = map_img_to_real(second_center_y, DATA.area_height_captured, CFG.AREA_HEIGHT_REAL)\n DATA.led2_pos = (second_center_x, second_center_y)\n else:\n DATA.led2_pos = None\n\n\n #if these blobs have areas > 0, then calculate the average of their centroids\n if (moment0['m00'] > 0 and moment1['m00'] > 0):\n\n #led_check = self.check_LED(center_x, center_y, second_center_x, second_center_y)\n #led_check was inside\n if (True):\n DATA.robot_center = ((center_x + second_center_x)/2.0, (center_y + second_center_y)/2.0)\n h, w, c = DATA.base_image.shape\n robot_centre_img = map_point_to_img(DATA.robot_center, (h, w), (CFG.AREA_HEIGHT_REAL, CFG.AREA_WIDTH_REAL))\n led1_pos_img = map_point_to_img(DATA.led1_pos, (h, w), (CFG.AREA_HEIGHT_REAL, CFG.AREA_WIDTH_REAL))\n led2_pos_img = map_point_to_img(DATA.led2_pos, (h, w), (CFG.AREA_HEIGHT_REAL, CFG.AREA_WIDTH_REAL))\n cv.circle(DATA.base_image, robot_centre_img, 10, (255, 255, 0))\n cv.circle(DATA.threshed_images[0], robot_centre_img, 10, (255, 255, 0))\n DATA.heading = math.atan2(led1_pos_img[0]-led2_pos_img[0], led1_pos_img[1]-led2_pos_img[1]) + -np.pi\n DATA.heading = -1 * math.atan2(math.sin(DATA.heading), math.cos(DATA.heading))\n DATA.detected = True\n else:\n DATA.robot_center = None\n DATA.heading = None\n DATA.detected = False\n\n else:\n DATA.robot_center = None\n DATA.heading = None\n DATA.detected = False\n\n def detectAndTrack(self, SETTINGS, DATA, ROBOT):\n \"\"\" this function organizes all of the processing\n done for each image from a camera type 2Led robot \"\"\"\n\n if DATA.base_image is None:\n raise Exception(\"No base_iamge provided. {->detectAndTrack2LedRobot}\")\n DATA.processed_image = DATA.base_image\n #DATA.processed_image = cv.bilateralFilter(DATA.processed_image, 25, 25, 25)\n \n if DATA.created_images == False:\n self.init_images(DATA, SETTINGS)\n DATA.created_images = True\n \n self.threshold_image(DATA, SETTINGS)\n self.find_2Led(DATA, SETTINGS)\n\n key_press_raw = cv.waitKey(1) # gets a raw key press\n key_press = key_press_raw & 0xFF # same as 255# sets all but the low 8 bits to 0\n \n # Handle key presses only (255 = \"no key pressed\")\n if key_press != 255:\n self.check_key_press(key_press, DATA, SETTINGS)\n\n h, w, c = DATA.base_image.shape\n x, y = DATA.target\n # updatee the displays:\n xI = map_real_to_img(x, w, CFG.AREA_WIDTH_REAL)\n yI = map_real_to_img(y, h, CFG.AREA_HEIGHT_REAL)\n target = (xI, yI)\n\n cv.circle(DATA.base_image, target, 3, (255,0,0), 2, -1)\n #draw_buttons(DATA.base_iamge)\n cv.imshow('Tracking and recognition', DATA.base_image) \n #OnGreenButtonClick(SETTINGS)\n #OnRedButtonClick(SETTINGS)\n # Currently selected threshold image:\n for i in range(len(DATA.threshed_images)):\n cv.imshow('Threshold_%d' % i, DATA.current_threshold[i])\n if (DATA.robot_center and DATA.led2_pos) != None:\n return ROBOT.update(time.clock() - self.time, DATA.robot_center, DATA.led1_pos, DATA.led2_pos, DATA.heading)\n else: return ROBOT\n\n # Callback zachowanie dla przycisków i z pętlą dla przyciskow ustawiajacy wyswietlany thresh\n def check_key_press(self, key_press, DATA, SETTINGS):\n\n SETTINGS.last_key_pressed = key_press\n\n # if it was ESC, make it 'q'\n if key_press == 27:\n key_press = ord('q')\n\n # if a 'q' or ESC was pressed, we quit\n if key_press == ord('q'): \n print(\"Quitting\")\n return\n\n # help menu\n if key_press == ord('h'):\n print(\" Keyboard Command Menu\")\n print(\" ==============================\")\n print(\" q : quit\")\n print(\" ESC : quit\")\n print(\" h : help menu\")\n print(\" w : show total threshold image in threshold window\")\n print(\" r : show red image in threshold window\")\n print(\" t : show green image in threshold window\")\n print(\" y : show blue image in threshold window\")\n print(\" f : show thresholded red image in threshold window\")\n print(\" g : show thresholded blue image in threshold window\")\n print(\" h : show thresholded green image in threshold window\")\n print(\" a : show hue_image image in threshold window\")\n print(\" s : show saturation image in threshold window\")\n print(\" p : draw robot path\")\n print(\" d : show value image in threshold window\")\n print(\" z : show thresholded hue_image image in threshold window\")\n print(\" x : show thresholded saturation image in threshold window\")\n print(\" c : show thresholded value image in threshold window\")\n print(\" v : saves threshold values to file\")\n print(\" b : loads threshold values from pikle file\")\n\n elif key_press == ord('v'):\n save_thresholds(SETTINGS.thresholds, CFG.THRESHOLDS_FILE_PATH)\n\n elif key_press == ord('b'):\n load_thresholds(SETTINGS.thresholds, CFG.THRESHOLDS_FILE_PATH)\n\n elif key_press == ord('p'):\n generate_path_image(DATA)\n \n # threshold keypresses:\n elif key_press in list(DATA.key_dictionary.keys()):\n DATA.current_threshold = DATA.key_dictionary[key_press]\n \n", "sub_path": "2Led/trackers/tracker2Led.py", "file_name": "tracker2Led.py", "file_ext": "py", "file_size_in_byte": 18146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 33, "usage_type": "call"}, {"api_name": "config.D.LEFT_LD", "line_number": 40, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 40, "usage_type": "call"}, {"api_name": "config.D.RIGHT_LD", "line_number": 41, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 44, "usage_type": "call"}, {"api_name": "config.D.CAMERA_CALIBRATION_UNDISTORT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 66, "usage_type": "name"}, {"api_name": "config.D.CAMERA_CALIBRATION_PATH", "line_number": 67, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 67, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 68, "usage_type": "call"}, {"api_name": "config.D.CAMERA_CALIBRATION_UNDISTORT", "line_number": 104, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 104, "usage_type": "name"}, {"api_name": "cv2.undistort", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 142, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 144, "usage_type": "call"}, {"api_name": "cv2.multiply", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.multiply", "line_number": 158, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 203, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 203, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 203, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 222, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 226, "usage_type": "call"}, {"api_name": "cv2.polylines", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 233, "usage_type": "attribute"}, {"api_name": "cv2.polylines", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 235, "usage_type": "attribute"}, {"api_name": "cv2.polylines", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 237, "usage_type": "attribute"}, {"api_name": "cv2.moments", "line_number": 245, "usage_type": "call"}, {"api_name": "cv2.moments", "line_number": 246, "usage_type": "call"}, {"api_name": "config.D.AREA_WIDTH_REAL", "line_number": 250, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 250, "usage_type": "name"}, {"api_name": "config.D.AREA_HEIGHT_REAL", "line_number": 253, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 253, "usage_type": "name"}, {"api_name": "config.D.AREA_WIDTH_REAL", "line_number": 260, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 260, "usage_type": "name"}, {"api_name": "config.D.AREA_HEIGHT_REAL", "line_number": 263, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 263, "usage_type": "name"}, {"api_name": "config.D.AREA_HEIGHT_REAL", "line_number": 277, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 277, "usage_type": "name"}, {"api_name": "config.D.AREA_WIDTH_REAL", "line_number": 277, "usage_type": "attribute"}, {"api_name": "config.D.AREA_HEIGHT_REAL", "line_number": 278, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 278, "usage_type": "name"}, {"api_name": "config.D.AREA_WIDTH_REAL", "line_number": 278, "usage_type": "attribute"}, {"api_name": "config.D.AREA_HEIGHT_REAL", "line_number": 279, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 279, "usage_type": "name"}, {"api_name": "config.D.AREA_WIDTH_REAL", "line_number": 279, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 280, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 281, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 282, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 283, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 283, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 283, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 311, "usage_type": "call"}, {"api_name": "config.D.AREA_WIDTH_REAL", "line_number": 321, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 321, "usage_type": "name"}, {"api_name": "config.D.AREA_HEIGHT_REAL", "line_number": 322, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 322, "usage_type": "name"}, {"api_name": "cv2.circle", "line_number": 325, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 327, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 332, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 334, "usage_type": "call"}, {"api_name": "config.D.THRESHOLDS_FILE_PATH", "line_number": 376, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 376, "usage_type": "name"}, {"api_name": "config.D.THRESHOLDS_FILE_PATH", "line_number": 379, "usage_type": "attribute"}, {"api_name": "config.D", "line_number": 379, "usage_type": "name"}]} +{"seq_id": "480504550", "text": "from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\n\nfrom us_ignite.actionclusters import mailer\nfrom us_ignite.actionclusters.forms import (\n ActionClusterForm,\n ActionClusterLinkFormSet,\n MembershipForm,\n ActionClusterMediaFormSet,\n ActionClusterMembershipFormSet\n)\nfrom us_ignite.actionclusters.models import (\n ActionCluster,\n ActionClusterMembership,\n Domain,\n Year\n)\nfrom us_ignite.awards.models import ActionClusterAward\nfrom us_ignite.common import pagination\nfrom us_ignite.hubs.forms import HubActionClusterMembershipForm\nfrom us_ignite.hubs.models import HubActionClusterMembership\n\n\ndef get_stage_or_404(stage):\n for pk, name in ActionCluster.STAGE_CHOICES:\n if pk == int(stage):\n return (pk, name)\n raise Http404('Invalid stage.')\n\n\ndef actioncluster_list(request, current=True, domain=None, stage=None, year=None, filter_name='', description=''):\n \"\"\"List all the available ``ActionCluster``.\"\"\"\n extra_qs = {\n 'is_approved': True,\n 'status': ActionCluster.PUBLISHED,\n }\n if domain:\n # Validate domain is valid if provided:\n extra_qs['domain'] = get_object_or_404(Domain, slug=domain)\n filter_name = extra_qs['domain'].name\n elif stage:\n # Validate stage is valid if provided:\n pk, name = get_stage_or_404(stage)\n extra_qs['stage'] = pk\n filter_name = name\n elif year:\n extra_qs['year'] = get_object_or_404(Year, year=year)\n filter_name = extra_qs['year'].year\n description = extra_qs['year'].description\n else:\n if current:\n extra_qs['year'] = get_object_or_404(Year, default_year=True)\n filter_name = 'Current Year (' + extra_qs['year'].year + ')'\n description = extra_qs['year'].description\n else:\n extra_qs['year'] = get_object_or_404(Year, default_year=False)\n filter_name = 'Archive 2015'\n\n\n\n\n page_no = pagination.get_page_no(request.GET)\n object_list = (\n ActionCluster.objects.select_related('domain')\n .filter(**extra_qs).order_by('needs_partner'))\n featured_list = (ActionCluster.objects.select_related('domain')\n .filter(is_featured=True, **extra_qs)[:3])\n page = pagination.get_page(object_list, page_no)\n context = {\n 'featured_list': featured_list,\n 'page': page,\n 'domain_list': Domain.objects.all(),\n 'stage_list': ActionCluster.STAGE_CHOICES,\n 'filter_name': filter_name,\n 'description': description,\n 'current_domain': domain,\n 'current_stage': int(stage) if stage else None,\n 'appname': 'actionclusters',\n }\n return TemplateResponse(request, 'actionclusters/object_list.html', context)\n\n\ndef actioncluster_list_partner(request):\n \"\"\"List action clusters looking for a partner.\"\"\"\n extra_qs = {\n 'is_approved': True,\n 'status': ActionCluster.PUBLISHED,\n 'needs_partner': True\n }\n page_no = pagination.get_page_no(request.GET)\n object_list = (\n ActionCluster.objects.select_related('domain').filter(**extra_qs))\n featured_list = (ActionCluster.objects.select_related('domain')\n .filter(is_featured=True, **extra_qs)[:3])\n page = pagination.get_page(object_list, page_no)\n context = {\n 'featured_list': featured_list,\n 'page': page,\n 'domain_list': Domain.objects.all(),\n 'stage_list': ActionCluster.STAGE_CHOICES,\n 'appname': 'actionclusters',\n 'category': 'partner'\n }\n return TemplateResponse(request, 'actionclusters/object_list.html', context)\n\n\ndef actioncluster_list_iot(request):\n \"\"\"List IOT project ideas.\"\"\"\n extra_qs = {\n 'is_approved': False,\n 'status': ActionCluster.PUBLISHED,\n }\n page_no = pagination.get_page_no(request.GET)\n object_list = (\n ActionCluster.objects.select_related('domain').filter(**extra_qs))\n featured_list = (ActionCluster.objects.select_related('domain')\n .filter(is_featured=True, **extra_qs)[:3])\n page = pagination.get_page(object_list, page_no)\n context = {\n 'featured_list': featured_list,\n 'page': page,\n 'domain_list': Domain.objects.all(),\n 'stage_list': ActionCluster.STAGE_CHOICES,\n 'appname': 'actionclusters',\n 'category': 'iot'\n }\n return TemplateResponse(request, 'actionclusters/object_list.html', context)\n\n\ndef get_actioncluster_for_user(slug, user):\n \"\"\"Validates the user can access the given app.\"\"\"\n actioncluster = get_object_or_404(ActionCluster.active, slug__exact=slug)\n # Application is published, no need for validation:\n if actioncluster.is_visible_by(user):\n return actioncluster\n raise Http404\n\n\ndef get_award_list(actioncluster):\n \"\"\"Returns the list of awards for an app.\"\"\"\n award_queryset = (ActionClusterAward.objects\n .select_related('award').filter(actioncluster=actioncluster))\n return [a.award for a in award_queryset]\n\n\ndef get_hub_list(actioncluster):\n \"\"\"Returns the list of hubs for an app.\"\"\"\n hub_queryset = actioncluster.hubactionclustermembership_set.select_related('hub').all()\n return [h.hub for h in hub_queryset]\n\n\ndef actioncluster_detail(request, slug):\n actioncluster = get_actioncluster_for_user(slug, request.user)\n related_list = (ActionCluster.active.filter(domain=actioncluster.domain)\n .order_by('?')[:3])\n context = {\n 'object': actioncluster,\n 'domain': actioncluster.domain,\n 'community' : actioncluster.community,\n 'url_list': actioncluster.actionclusterurl_set.all(),\n 'media_list': actioncluster.actionclustermedia_set.all(),\n 'feature_list': actioncluster.features.all(),\n 'member_list': actioncluster.members.select_related('profile').all(),\n 'hub_list': get_hub_list(actioncluster),\n 'related_list': related_list,\n 'award_list': get_award_list(actioncluster),\n 'can_edit': actioncluster.is_editable_by(request.user),\n 'is_owner': actioncluster.is_owned_by(request.user),\n }\n return TemplateResponse(request, 'actionclusters/object_detail.html', context)\n\n\n# @login_required\ndef actioncluster_add(request):\n \"\"\"View for adding an ``Application``.\"\"\"\n if request.method == 'POST':\n form = ActionClusterForm(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.owner = request.user\n instance.save()\n form.save_m2m()\n mailer.notify_request(instance)\n messages.success(\n request, 'The action cluster \"%s\" has been added.' % instance.name)\n return redirect(instance.get_absolute_url())\n else:\n form = ActionClusterForm()\n context = {\n 'form': form,\n }\n return TemplateResponse(request, 'actionclusters/object_add.html', context)\n\n\n@login_required\ndef actioncluster_edit(request, slug):\n actioncluster = get_object_or_404(ActionCluster.active, slug__exact=slug)\n if not actioncluster.is_editable_by(request.user):\n raise Http404\n if request.method == 'POST':\n form = ActionClusterForm(\n request.POST, request.FILES, instance=actioncluster)\n link_formset = ActionClusterLinkFormSet(\n request.POST, instance=actioncluster)\n image_formset = ActionClusterMediaFormSet(\n request.POST, request.FILES, instance=actioncluster)\n if (form.is_valid() and link_formset.is_valid()\n and image_formset.is_valid()):\n instance = form.save()\n link_formset.save()\n image_formset.save()\n messages.success(\n request, 'The action cluster \"%s\" has been updated.'\n % instance.name)\n return redirect(instance.get_absolute_url())\n else:\n form = ActionClusterForm(instance=actioncluster)\n link_formset = ActionClusterLinkFormSet(instance=actioncluster)\n image_formset = ActionClusterMediaFormSet(instance=actioncluster)\n context = {\n 'object': actioncluster,\n 'form': form,\n 'link_formset': link_formset,\n 'image_formset': image_formset,\n }\n return TemplateResponse(request, 'actionclusters/object_edit.html', context)\n\n\ndef create_member(actioncluster, user):\n \"\"\"Create a new member when it is unexistent and return it.\"\"\"\n membership, is_new = (ActionClusterMembership.objects\n .get_or_create(actioncluster=actioncluster, user=user))\n return membership if is_new else None\n\n\n@login_required\ndef actioncluster_membership(request, slug):\n \"\"\"Adds collaborators to an application.\"\"\"\n actioncluster = get_object_or_404(\n ActionCluster.active, slug__exact=slug)\n if not actioncluster.is_owned_by(request.user):\n raise Http404\n if request.method == 'POST':\n form = MembershipForm(request.POST)\n formset = ActionClusterMembershipFormSet(\n request.POST, instance=actioncluster)\n if form.is_valid() and formset.is_valid():\n for member in form.cleaned_data['collaborators']:\n create_member(actioncluster, member)\n formset.save()\n messages.success(request, 'Membership successfully updated.')\n return redirect(actioncluster.get_membership_url())\n else:\n form = MembershipForm()\n formset = ActionClusterMembershipFormSet(instance=actioncluster)\n context = {\n 'object': actioncluster,\n 'form': form,\n 'formset': formset,\n }\n return TemplateResponse(\n request, 'actionclusters/object_membership.html', context)\n\n\n@login_required\ndef actioncluster_export(request, slug):\n \"\"\"Generates an export of the current status of the application.\"\"\"\n actioncluster = get_object_or_404(ActionCluster.active, slug__exact=slug)\n if not actioncluster.has_member(request.user):\n raise Http404\n context = {\n 'object': actioncluster,\n 'url_list': actioncluster.actionclusterurl_set.all(),\n 'image_list': actioncluster.actionclustermedia_set.all(),\n 'feature_list': actioncluster.features.all(),\n 'member_list': actioncluster.members.select_related('profile').all(),\n\n }\n content = render_to_string('actionclusters/export.txt', context)\n response = HttpResponse(content, content_type='text/plain')\n filename = '%s-export-%s' % (\n actioncluster.slug, timezone.now().strftime(\"%Y%m%d-%H%M%S\"))\n response['Content-Disposition'] = (\n 'attachment; filename=\"%s.txt\"' % filename)\n response['Content-Length'] = len(response.content)\n return response\n\n\ndef _get_membership_form(membership_list):\n id_list = [m.hub.id for m in membership_list]\n args = [{'hubs': id_list}] if id_list else []\n return HubActionClusterMembershipForm(*args)\n\n\ndef _update_membership(actioncluster, hub_list, membership_list):\n # Remove any non selected hub membership:\n for membership in membership_list:\n if membership.hub not in hub_list:\n membership.delete()\n # Add any new Hub membership:\n new_membership_list = []\n return [_add_hub_membership(hub, actioncluster) for hub in hub_list]\n\n\ndef _add_hub_membership(hub, actioncluster):\n \"\"\"Generates the hub membership.\"\"\"\n instance, is_new = HubActionClusterMembership.objects.get_or_create(\n hub=hub, actioncluster=actioncluster)\n # Record the activity for this membership.\n if is_new:\n name = ('Action cluster %s has been registered as part of this '\n 'community.' % actioncluster.name)\n extra_data = {\n 'url': actioncluster.get_absolute_url(),\n 'user': actioncluster.owner,\n }\n hub.record_activity(name, extra_data=extra_data)\n return instance\n\n\n@login_required\ndef actioncluster_hub_membership(request, slug):\n \"\"\"View to manage the membership of an app to a hub.\"\"\"\n actioncluster = get_object_or_404(ActionCluster.active, slug__exact=slug)\n if not actioncluster.is_editable_by(request.user):\n raise Http404\n # Determine existing membership:\n actioncluster_hubs = (actioncluster.hubactionclustermembership_set\n .select_related('hub').all())\n if request.method == 'POST':\n form = HubActionClusterMembershipForm(request.POST)\n if form.is_valid():\n hubs = form.cleaned_data['hubs']\n _update_membership(actioncluster, hubs, actioncluster_hubs)\n msg = 'Hub membership updated.'\n messages.success(request, msg)\n return redirect(actioncluster.get_absolute_url())\n else:\n form = _get_membership_form(actioncluster_hubs)\n context = {\n 'object': actioncluster,\n 'form': form,\n }\n return TemplateResponse(\n request, 'actionclusters/object_hub_membership.html', context)\n", "sub_path": "us_ignite/actionclusters/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 13296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "us_ignite.actionclusters.models.ActionCluster.STAGE_CHOICES", "line_number": 30, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 30, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 33, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.PUBLISHED", "line_number": 40, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 44, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Domain", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 52, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Year", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 57, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Year", "line_number": 57, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Year", "line_number": 61, "usage_type": "argument"}, {"api_name": "us_ignite.common.pagination.get_page_no", "line_number": 67, "usage_type": "call"}, {"api_name": "us_ignite.common.pagination", "line_number": 67, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects.select_related", "line_number": 69, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 69, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects.select_related", "line_number": 71, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 71, "usage_type": "name"}, {"api_name": "us_ignite.common.pagination.get_page", "line_number": 73, "usage_type": "call"}, {"api_name": "us_ignite.common.pagination", "line_number": 73, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.Domain.objects.all", "line_number": 77, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Domain.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.Domain", "line_number": 77, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.STAGE_CHOICES", "line_number": 78, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 78, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 85, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.PUBLISHED", "line_number": 92, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 92, "usage_type": "name"}, {"api_name": "us_ignite.common.pagination.get_page_no", "line_number": 95, "usage_type": "call"}, {"api_name": "us_ignite.common.pagination", "line_number": 95, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects.select_related", "line_number": 97, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 97, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects.select_related", "line_number": 98, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 98, "usage_type": "name"}, {"api_name": "us_ignite.common.pagination.get_page", "line_number": 100, "usage_type": "call"}, {"api_name": "us_ignite.common.pagination", "line_number": 100, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.Domain.objects.all", "line_number": 104, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Domain.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.Domain", "line_number": 104, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.STAGE_CHOICES", "line_number": 105, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 105, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 109, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.PUBLISHED", "line_number": 116, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 116, "usage_type": "name"}, {"api_name": "us_ignite.common.pagination.get_page_no", "line_number": 118, "usage_type": "call"}, {"api_name": "us_ignite.common.pagination", "line_number": 118, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects.select_related", "line_number": 120, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 120, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects.select_related", "line_number": 121, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 121, "usage_type": "name"}, {"api_name": "us_ignite.common.pagination.get_page", "line_number": 123, "usage_type": "call"}, {"api_name": "us_ignite.common.pagination", "line_number": 123, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.Domain.objects.all", "line_number": 127, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.Domain.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.Domain", "line_number": 127, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.STAGE_CHOICES", "line_number": 128, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 128, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 132, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 137, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active", "line_number": 137, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 137, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 141, "usage_type": "name"}, {"api_name": "us_ignite.awards.models.ActionClusterAward.objects.select_related", "line_number": 146, "usage_type": "call"}, {"api_name": "us_ignite.awards.models.ActionClusterAward.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "us_ignite.awards.models.ActionClusterAward", "line_number": 146, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active.filter", "line_number": 159, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active", "line_number": 159, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 159, "usage_type": "name"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 175, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterForm", "line_number": 182, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.mailer.notify_request", "line_number": 188, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.mailer", "line_number": 188, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 189, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 189, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 191, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterForm", "line_number": 193, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 197, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 202, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active", "line_number": 202, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 202, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 204, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterForm", "line_number": 206, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterLinkFormSet", "line_number": 208, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterMediaFormSet", "line_number": 210, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 217, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 217, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 220, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterForm", "line_number": 222, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterLinkFormSet", "line_number": 223, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterMediaFormSet", "line_number": 224, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 231, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 200, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.models.ActionClusterMembership.objects.get_or_create", "line_number": 236, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionClusterMembership.objects", "line_number": 236, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionClusterMembership", "line_number": 236, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 244, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active", "line_number": 245, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 245, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 247, "usage_type": "name"}, {"api_name": "us_ignite.actionclusters.forms.MembershipForm", "line_number": 249, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterMembershipFormSet", "line_number": 250, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 256, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 256, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 257, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.MembershipForm", "line_number": 259, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.forms.ActionClusterMembershipFormSet", "line_number": 260, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 266, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 241, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 273, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active", "line_number": 273, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 273, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 275, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 284, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 285, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 287, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 287, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 270, "usage_type": "name"}, {"api_name": "us_ignite.hubs.forms.HubActionClusterMembershipForm", "line_number": 297, "usage_type": "call"}, {"api_name": "us_ignite.hubs.models.HubActionClusterMembership.objects.get_or_create", "line_number": 312, "usage_type": "call"}, {"api_name": "us_ignite.hubs.models.HubActionClusterMembership.objects", "line_number": 312, "usage_type": "attribute"}, {"api_name": "us_ignite.hubs.models.HubActionClusterMembership", "line_number": 312, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 329, "usage_type": "call"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster.active", "line_number": 329, "usage_type": "attribute"}, {"api_name": "us_ignite.actionclusters.models.ActionCluster", "line_number": 329, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 331, "usage_type": "name"}, {"api_name": "us_ignite.hubs.forms.HubActionClusterMembershipForm", "line_number": 336, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 341, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 341, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 342, "usage_type": "call"}, {"api_name": "django.template.response.TemplateResponse", "line_number": 349, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 326, "usage_type": "name"}]} +{"seq_id": "24413489", "text": "from flask import Flask, jsonify, render_template, request\nfrom flask_cors import CORS\nimport urllib.request\nimport time\nfrom datetime import timedelta\nimport datetime\nimport threading\n\napp = Flask(__name__, template_folder='webPage')\nCORS(app)\n\nIP_ADDRESSES =\t{\n \"LightAlarm_Bed\": \"http://192.168.1.151\",\n}\n\n\nweekdays = range(1,5)\n\n\ndef alarmWorker():\n while True:\n if (datetime.datetime.now() + timedelta(days=5)).isoweekday() in weekdays:\n print(\"Week Day\")\n else:\n print(\"Weekend\")\n print(\"Hello\")\n time.sleep(1)\n\n\n#t = threading.Thread(target=alarmWorker)\n#t.setDaemon(True)\n#t.start()\n\ndef setAlarmIntensity(intensity, changet):\n urlString = \"{0}/pwm?intensity={1}&changet={2}\".format(IP_ADDRESSES['LightAlarm_Bed'], intensity, changet)\n with urllib.request.urlopen(urlString) as response:\n html = response.read()\n print(html)\n if html.decode('utf-8') == \"OK\":\n return(True)\n else:\n return(False)\n\n\n@app.route(\"/api/pwm\", methods=['GET'])\ndef pwm():\n intensity = int(request.args.get('intensity'))\n changet = int(request.args.get('changet'))\n\n print(intensity, changet)\n if intensity is not None:\n if setAlarmIntensity(intensity, changet):\n return(\"OK\")\n else:\n return(\"Failed\")\n\n@app.route('/', methods=['GET'])\ndef hello():\n message = \"Smart Home\"\n return render_template('index.html', message=message)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=\"5000\", threaded=True)\n", "sub_path": "mainServer.py", "file_name": "mainServer.py", "file_ext": "py", "file_size_in_byte": 1560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 36, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 36, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "240270155", "text": "import re\nimport hydro_serving_grpc as hs\nimport pickle\nfrom keras.preprocessing import sequence\nwith open('/model/files/tokenizer.pickle', 'rb') as f:\n tokenizer = pickle.load(f)\n\nmax_features = 20000\nmaxlen = 100\ndef tokenize(text):\n sentence = [str.decode() for str in text.string_val][0]\n\n if 'www.' in sentence or 'http:' in sentence or 'https:' in sentence or '.com' in sentence:\n sentence = re.sub(r\"([^ ]+(?<=\\.[a-z]{3}))\", \"\", sentence)\n\n tok_sentence = tokenizer.texts_to_sequences([sentence])\n pad_sentence = sequence.pad_sequences(tok_sentence, maxlen=maxlen)[0]\n tok_tensor = hs.TensorProto(\n int64_val = pad_sentence,\n dtype=hs.DT_INT64,\n tensor_shape=hs.TensorShapeProto(dim=[hs.TensorShapeProto.Dim(size=100)]))\n return hs.PredictResponse(outputs={'tokenized': tok_tensor})\n\n", "sub_path": "examples/text_classification/models/tokenizer/src/func_main.py", "file_name": "func_main.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pickle.load", "line_number": 6, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 17, "usage_type": "name"}, {"api_name": "hydro_serving_grpc.TensorProto", "line_number": 18, "usage_type": "call"}, {"api_name": "hydro_serving_grpc.DT_INT64", "line_number": 20, "usage_type": "attribute"}, {"api_name": "hydro_serving_grpc.TensorShapeProto", "line_number": 21, "usage_type": "call"}, {"api_name": "hydro_serving_grpc.TensorShapeProto.Dim", "line_number": 21, "usage_type": "call"}, {"api_name": "hydro_serving_grpc.PredictResponse", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "508397370", "text": "\"\"\" Tests for seed_services_cli.stage_based_messaging \"\"\"\n\nfrom unittest import TestCase\nfrom click.testing import CliRunner\nfrom seed_services_cli.main import cli\n\n\nclass TestStageBasedMessagingCommands(TestCase):\n def setUp(self):\n self.runner = CliRunner()\n\n def tearDown(self):\n pass\n\n def test_schedule_list_help(self):\n result = self.runner.invoke(cli, ['sbm-schedules', '--help'])\n self.assertEqual(result.exit_code, 0)\n self.assertTrue(\n \"List all schedules\"\n in result.output)\n\n def test_messageset_list_help(self):\n result = self.runner.invoke(cli, ['sbm-messagesets', '--help'])\n self.assertEqual(result.exit_code, 0)\n self.assertTrue(\n \"List all messagesets\"\n in result.output)\n\n def test_message_list_help(self):\n result = self.runner.invoke(cli, ['sbm-messages', '--help'])\n self.assertEqual(result.exit_code, 0)\n self.assertTrue(\n \"List all messages\"\n in result.output)\n\n def test_message_delete_help(self):\n result = self.runner.invoke(cli, ['sbm-messages-delete', '--help'])\n self.assertEqual(result.exit_code, 0)\n self.assertTrue(\n \"Delete all messages matching filter\"\n in result.output)\n\n def test_messages_import_help(self):\n result = self.runner.invoke(cli, ['sbm-messages-import', '--help'])\n self.assertEqual(result.exit_code, 0)\n self.assertTrue(\n \"Import to the Stage Based Messaging service.\"\n in result.output)\n", "sub_path": "seed_services_cli/tests/test_stage_based_messaging.py", "file_name": "test_stage_based_messaging.py", "file_ext": "py", "file_size_in_byte": 1589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "click.testing.CliRunner", "line_number": 10, "usage_type": "call"}, {"api_name": "seed_services_cli.main.cli", "line_number": 16, "usage_type": "argument"}, {"api_name": "seed_services_cli.main.cli", "line_number": 23, "usage_type": "argument"}, {"api_name": "seed_services_cli.main.cli", "line_number": 30, "usage_type": "argument"}, {"api_name": "seed_services_cli.main.cli", "line_number": 37, "usage_type": "argument"}, {"api_name": "seed_services_cli.main.cli", "line_number": 44, "usage_type": "argument"}]} +{"seq_id": "605291716", "text": "import sys\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n\nimport fft\n\nN = 256\nM = N\n\nX,Y = np.mgrid[-N/2:N/2,-M/2:M/2]\n\nV = np.cos(2*np.pi*X/N) + np.sin(2*np.pi*Y/M)\nQ1 = fft.fft(V)\nP1 = fft.ifft(Q1)\n\nax = plt.subplot(2, 2, 1)\nax.imshow(V, interpolation='nearest')\nax = plt.subplot(2, 2, 2)\nax.imshow(np.real(P1), interpolation='nearest')\nax = plt.subplot(2, 2, 3)\nax.imshow(np.real(Q1), interpolation='nearest')\nax = plt.subplot(2, 2, 4)\nax.imshow(np.imag(Q1), interpolation='nearest')\nplt.show()\n\nQ2 = np.fft.fft(V)\nP2 = np.fft.ifft(Q2)\n\nplt.plot(X, V, '-b', X, np.real(Q2/np.sqrt(N)), '-g', X, np.real(P2), '-r')\n#plt.show()\n#print 'Q2:', Q2\n#print 'P2:', P2\n\nmax_dev = np.max(np.abs(P1-P2))\n#print 'maximum deviation:', max_dev\n\n\n", "sub_path": "fft/test_fft2.py", "file_name": "test_fft2.py", "file_ext": "py", "file_size_in_byte": 746, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.mgrid", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 14, "usage_type": "call"}, {"api_name": "fft.fft", "line_number": 15, "usage_type": "call"}, {"api_name": "fft.ifft", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.real", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.real", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.imag", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.fft.fft", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.real", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "344091357", "text": "import tkinter as tk\nfrom tkinter import ttk, scrolledtext\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport mysql.connector as mysql\n\nconex= mysql.connect(host='localhost',user='root',passwd='',db='Webs')\nop= conex.cursor(buffered=True)\n\ndef busca(pag):\n html= urlopen(pag)\n res= tk.Tk()\n res.title(\"Enlaces\")\n res.tk_setPalette('light gray')\n print(\"\\nExtraer los enlaces de la página web: \"+pag+\"\\n\")\n try:\n op.execute('INSERT INTO Enlaces values(\"'+pag+'\", False)')\n except:\n pass\n op.execute('SELECT * from Enlaces where status=0')\n conex.commit()\n for pag, trsh in op:\n print(\"\\nRevisando \"+pag+\"\\n\")\n try:\n html= urlopen(pag)\n bs= BeautifulSoup(html,'html.parser')\n for link in bs.findAll('a'):\n print('href: {}'.format(link.get('href')))\n try:\n op.execute('INSERT INTO Enlaces values(\"'+link.get('href')+'\", False)')\n except:\n print(\"Valor Duplicado\")\n except:\n print(\"No se pudo acceder a \"+pag+\"\\n\")\n op.execute(\"UPDATE Enlaces SET status=1 WHERE pagina='\"+pag+\"'\")\n conex.commit()\n print(\"\\nTerminó de Revisar Enlaces de \"+pag)\n op.execute(\"SELECT * from Enlaces where status=0\")\n print(\"Revisión de enlaces Finalizada\")\n conex.commit()\n conex.close()\n\nventana= tk.Tk()\nventana.title(\"Web Scraping\")\nttk.Label(ventana,text=\"Ingrese una página web: \").pack()\npag= tk.StringVar()\nttk.Entry(ventana,width=50,textvariable=pag).pack()\nttk.Button(ventana,text=\"Iniciar\",command=lambda: busca(pag.get())).pack()\n\nventana.mainloop()", "sub_path": "Práctica 3.py", "file_name": "Práctica 3.py", "file_ext": "py", "file_size_in_byte": 1690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "mysql.connector.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "mysql.connector", "line_number": 7, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 11, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 25, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 45, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 45, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 46, "usage_type": "call"}, {"api_name": "tkinter.ttk.Entry", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 47, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "424396717", "text": "\"\"\"\n@Project : text-classification-cnn-rnn\n@Module : concurrent_futures_as_completed.py\n@Author : Deco [deco@cubee.com]\n@Created : 7/10/18 6:12 PM\n@Desc : \n\"\"\"\nimport spacy\nimport time\nfrom concurrent import futures\nimport tqdm\n\n\ndef pipeline_tagger_parser_ner(cls, st):\n nlp = cls()\n # 2. initialise it\n pipeline = ['tagger', 'parser', 'ner']\n for name in pipeline:\n component = nlp.create_pipe(name)\n # 3. create the pipeline components\n nlp.add_pipe(component)\n # 4. add the component to the pipeline\n model_data_path = ('/home/deco/miniconda2/envs/tf17/lib/python3.6/'\n 'site-packages/en_core_web_md/en_core_web_md-2.0.0')\n nlp.from_disk(model_data_path)\n # 5. load in the binary data\n\n doc = nlp.make_doc(st)\n print('tokens in pipeline_tagger_parser_ner:')\n print([token.text for token in doc])\n return 'tagger_parser_ner'\n\n\ndef pipeline_tokenizer(cls, st):\n nlp = cls()\n model_data_path = ('/home/deco/miniconda2/envs/tf17/lib/python3.6/'\n 'site-packages/en_core_web_md/en_core_web_md-2.0.0')\n nlp.from_disk(model_data_path)\n\n doc = nlp.make_doc(st)\n print('tokens in pipeline_tokenizer:')\n print([token.text for token in doc])\n return 'tokenizer'\n\n\ndef serial_func(cls, st):\n for func in [pipeline_tagger_parser_ner, pipeline_tokenizer]:\n func(cls, st)\n\n\ndef func4(cls, st):\n time.sleep(1)\n print('func4', st)\n\n\ndef parallel_func(cls, st):\n func_list = [serial_func, pipeline_tagger_parser_ner,\n pipeline_tokenizer, func4]\n # with futures.ProcessPoolExecutor() as executor:\n with futures.ProcessPoolExecutor(max_workers=2) as executor:\n to_do = []\n for func in func_list:\n future = executor.submit(func, cls, st)\n to_do.append(future)\n # time.sleep(0.001)\n msg = 'Scheduled for {}: {}'\n print(msg.format(func.__name__, future))\n\n time.sleep(2)\n print('to do list:', to_do)\n\n results = []\n print('nonblocking')\n start = time.perf_counter()\n done_iter = futures.as_completed(to_do)\n done_iter = tqdm.tqdm(done_iter, total=len(func_list))\n # for future in futures.as_completed(to_do, timeout=100):\n for future in done_iter:\n # asyncronization\n print(time.perf_counter()-start)\n print('blocking')\n res = future.result()\n print('nonblocking because future.result() is already finished')\n msg = '{} result: {}'\n print(msg.format(future, res))\n results.append(res)\n print('to do list:', to_do)\n\n print('Is this part blocked?')\n\n return len(results)\n\n\nif __name__ == '__main__':\n\n lang0 = 'en'\n cls0 = spacy.util.get_lang_class(lang0)\n st0 = 'This is a sentence'\n\n parallel_func(cls0, st0)\n\n print('finished.')\n", "sub_path": "nlp_models/tqdm/concurrent_futures_as_completed.py", "file_name": "concurrent_futures_as_completed.py", "file_ext": "py", "file_size_in_byte": 2948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "concurrent.futures.ProcessPoolExecutor", "line_number": 60, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 60, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 74, "usage_type": "call"}, {"api_name": "concurrent.futures.as_completed", "line_number": 75, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 75, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 76, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 80, "usage_type": "call"}, {"api_name": "spacy.util.get_lang_class", "line_number": 97, "usage_type": "call"}, {"api_name": "spacy.util", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "220012140", "text": "\"\"\"analysis table\n\nRevision ID: 3ebf1ffd2db2\nRevises: 7ef1676fb46f\nCreate Date: 2021-05-29 07:37:48.744313\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '3ebf1ffd2db2'\ndown_revision = '7ef1676fb46f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('analysis',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('event_id', sa.Integer(), nullable=False),\n sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=True),\n sa.ForeignKeyConstraint(['event_id'], ['event.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('analysis')\n # ### end Alembic commands ###\n", "sub_path": "storage/alembic/versions/3ebf1ffd2db2_analysis_table.py", "file_name": "3ebf1ffd2db2_analysis_table.py", "file_ext": "py", "file_size_in_byte": 926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.JSONB", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlalchemy.Text", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "110862938", "text": "#\n# Copyright (c) 2011-2016, Hortonworks Inc. All rights reserved.\n#\n# Except as expressly permitted in a written agreement between your\n# company and Hortonworks, Inc, any use, reproduction, modification,\n# redistribution, sharing, lending or other exploitation of all or\n# any part of the contents of this file is strictly prohibited.\n#\n\nimport logging\nfrom beaver.seleniumDP import SeleniumDP\nfrom selenium.webdriver.common.by import By\nfrom beaver.component.hdfsUI.basePage import BasePage\nfrom taskreporter.taskreporter import TaskReporter\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger(\"requests\").setLevel(logging.ERROR)\nlogging.getLogger(\"urllib3\").setLevel(logging.ERROR)\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nclass NNUIPage(BasePage):\n def __init__(self, basePageObj):\n BasePage.__init__(self, driver=basePageObj.driver, base_url=basePageObj.base_url)\n\n locator_dictionary = {\n \"overviewTab\": (By.XPATH, \"//*[@id='ui-tabs']/li/a[contains(.,'Overview')]\"),\n \"overviewContainer\": (By.ID, \"tab-overview\"),\n \"overviewTabNNHostPortStatus\": (By.XPATH, \"//*[@id='tab-overview']/div[1]/h1/small\"),\n \"datanodesTab\": (By.XPATH, \"//*[@id='ui-tabs']/li/a[contains(.,'Datanodes')]\"),\n \"datanodeContainer\": (By.ID, \"tab-datanode\"),\n \"firstDatanodeLink\": (By.XPATH, \"//*[@id='table-datanodes']/tbody/tr[1]/td[2]/a\"),\n \"secondDatanodeLink\": (By.XPATH, \"//*[@id='table-datanodes']/tbody/tr[2]/td[2]/a\"),\n \"datanodesVolumeFailuresTab\": (By.XPATH, \"//*[@id='ui-tabs']/li/a[contains(.,'Datanode Volume Failures')]\"),\n \"datanodeVolumeFailureContainer\": (By.ID, \"tab-datanode-volume-failures\"),\n \"snapshotTab\": (By.XPATH, \"//*[@id='ui-tabs']/li/a[contains(.,'Snapshot')]\"),\n \"snapshotContainer\": (By.ID, \"tab-snapshot\"),\n \"startupProgressTab\": (By.XPATH, \"//*[@id='ui-tabs']/li/a[contains(.,'Startup Progress')]\"),\n \"startupProgressContainer\": (By.ID, \"tab-startup-progress\"),\n \"utilitiesDropdown\": (By.XPATH, \"//*[@id='ui-tabs']/li/a[contains(.,'Utilities')]\"),\n \"utilitiesDropdownBrowseFS\": (By.XPATH, \"//*[@id='ui-tabs']/li/ul/li/a[contains(.,'Browse the file system')]\"),\n \"utilitiesDropdownLogs\": (By.XPATH, \"//*[@id='ui-tabs']/li/ul/li/a[contains(.,'Logs')]\"),\n \"utilitiesDropdownMetrics\": (By.XPATH, \"//*[@id='ui-tabs']/li/ul/li/a[contains(.,'Metrics')]\"),\n \"utilitiesDropdownConfiguration\": (By.XPATH, \"//*[@id='ui-tabs']/li/ul/li/a[contains(.,'Configuration')]\"),\n \"utilitiesDropdownProcessThreadDump\": (\n By.XPATH, \"//*[@id='ui-tabs']/li/ul/li/a[contains(.,'Process Thread Dump')]\"\n ),\n \"browseDirectoryHeader\": (By.XPATH, \"//div[@class='page-header']//h1[contains(.,'Browse Directory')]\"),\n \"logsHeader\": (By.XPATH, \"/html/body/h1[contains(.,'Directory: /logs/')]\"),\n \"logsHeaderError\": (By.XPATH, \"/html/body/h2[contains(.,'HTTP ERROR: 403')]\"),\n \"metricsJmx\": (By.XPATH, \"/html/body/pre[contains(.,'beans')]\"),\n \"conf\": (By.XPATH, \"/configuration\"),\n \"processThreadDump\": (By.XPATH, \"/html/body/pre[contains(.,'Process Thread Dump')]\"),\n }\n\n def isNNUIPage(self, retryCount=0, timeout=None, restartWebDriver=False, quitWebdriver=False):\n return self.checkElementonPage(\n locatorName=self.getOverviewTab(returnLocatorName=True),\n locatorMessage='NN UI Page',\n retryCount=retryCount,\n timeout=timeout,\n restartWebDriver=restartWebDriver,\n quitWebdriver=quitWebdriver\n )\n\n def getOverviewTab(self, returnLocatorName=False):\n return self.getElement('overviewTab', returnLocatorName)\n\n def getOverviewContainer(self, returnLocatorName=False):\n return self.getElement('overviewContainer', returnLocatorName)\n\n def getOverviewTabNNHostPortStatus(self, returnLocatorName=False):\n return self.getElement('overviewTabNNHostPortStatus', returnLocatorName)\n\n def getOverviewTabNNHostPortStatusText(self):\n return self.getOverviewTabNNHostPortStatus().text.lower()\n\n def getDatanodesTab(self, returnLocatorName=False):\n return self.getElement('datanodesTab', returnLocatorName)\n\n def getDatanodeContainer(self, returnLocatorName=False):\n return self.getElement('datanodeContainer', returnLocatorName)\n\n def getFirstDatanodeLink(self, returnLocatorName=False):\n return self.getElement('firstDatanodeLink', returnLocatorName)\n\n def getSecondDatanodeLink(self, returnLocatorName=False):\n return self.getElement('secondDatanodeLink', returnLocatorName)\n\n def getDatanodesVolumeFailuresTab(self, returnLocatorName=False):\n return self.getElement('datanodesVolumeFailuresTab', returnLocatorName)\n\n def getDatanodeVolumeFailureContainer(self, returnLocatorName=False):\n return self.getElement('datanodeVolumeFailureContainer', returnLocatorName)\n\n def getSnapshotTab(self, returnLocatorName=False):\n return self.getElement('snapshotTab', returnLocatorName)\n\n def getSnapshotContainer(self, returnLocatorName=False):\n return self.getElement('snapshotContainer', returnLocatorName)\n\n def getStartupProgressTab(self, returnLocatorName=False):\n return self.getElement('startupProgressTab', returnLocatorName)\n\n def getStartupProgressContainer(self, returnLocatorName=False):\n return self.getElement('startupProgressContainer', returnLocatorName)\n\n def getUtilitiesDropdown(self, returnLocatorName=False):\n return self.getElement('utilitiesDropdown', returnLocatorName)\n\n def getUtilitiesDropdownBrowseFS(self, returnLocatorName=False):\n return self.getElement('utilitiesDropdownBrowseFS', returnLocatorName)\n\n def getUtilitiesDropdownLogs(self, returnLocatorName=False):\n return self.getElement('utilitiesDropdownLogs', returnLocatorName)\n\n def getUtilitiesDropdownMetrics(self, returnLocatorName=False):\n return self.getElement('utilitiesDropdownMetrics', returnLocatorName)\n\n def getUtilitiesDropdownConfiguration(self, returnLocatorName=False):\n return self.getElement('utilitiesDropdownConfiguration', returnLocatorName)\n\n def getUtilitiesDropdownProcessThreadDump(self, returnLocatorName=False):\n return self.getElement('utilitiesDropdownProcessThreadDump', returnLocatorName)\n\n def getBrowseDirectoryHeader(self, returnLocatorName=False):\n return self.getElement('browseDirectoryHeader', returnLocatorName)\n\n def getLogsHeader(self, returnLocatorName=False):\n return self.getElement('logsHeader', returnLocatorName)\n\n def getLogsHeaderError(self, returnLocatorName=False):\n return self.getElement('logsHeaderError', returnLocatorName)\n\n def getMetricsJmx(self, returnLocatorName=False):\n return self.getElement('metricsJmx', returnLocatorName)\n\n def getConf(self, returnLocatorName=False):\n return self.getElement('conf', returnLocatorName)\n\n def getProcessThreadDump(self, returnLocatorName=False):\n return self.getElement('processThreadDump', returnLocatorName)\n\n def clickOverviewTab(self):\n SeleniumDP.click(self.driver, self.getOverviewTab())\n\n def clickDatanodesTab(self):\n SeleniumDP.click(self.driver, self.getDatanodesTab())\n\n def clickFirstDatanodeLink(self):\n SeleniumDP.click(self.driver, self.getFirstDatanodeLink())\n\n def clickSecondDatanodeLink(self):\n SeleniumDP.click(self.driver, self.getSecondDatanodeLink())\n\n def clickDatanodesVolumeFailuresTab(self):\n SeleniumDP.click(self.driver, self.getDatanodesVolumeFailuresTab())\n\n def clickSnapshotTab(self):\n SeleniumDP.click(self.driver, self.getSnapshotTab())\n\n def clickStartupProgressTab(self):\n SeleniumDP.click(self.driver, self.getStartupProgressTab())\n\n def clickUtilitiesDropdown(self):\n SeleniumDP.click(self.driver, self.getUtilitiesDropdown())\n\n def clickUtilitiesDropdownBrowseFS(self):\n SeleniumDP.click(self.driver, self.getUtilitiesDropdownBrowseFS())\n\n def clickUtilitiesDropdownLogs(self):\n SeleniumDP.click(self.driver, self.getUtilitiesDropdownLogs())\n\n def clickUtilitiesDropdownMetrics(self):\n SeleniumDP.click(self.driver, self.getUtilitiesDropdownMetrics())\n\n def clickUtilitiesDropdownConfiguration(self):\n SeleniumDP.click(self.driver, self.getUtilitiesDropdownConfiguration())\n\n def clickUtilitiesDropdownProcessThreadDump(self):\n SeleniumDP.click(self.driver, self.getUtilitiesDropdownProcessThreadDump())\n\n @TaskReporter.report_test()\n def gotoOverviewTab(self):\n self.checkElementonPage(\n locatorName=self.getOverviewTab(returnLocatorName=True),\n locatorMessage='Overview Tab',\n retryCount=4,\n timeout=0.5\n )\n self.clickOverviewTab()\n assert self.checkElementonPage(\n locatorName=self.getOverviewContainer(returnLocatorName=True),\n locatorMessage='Overview Container',\n retryCount=4,\n timeout=0.5\n )\n\n @TaskReporter.report_test()\n def gotoDatanodesTab(self):\n # Due to intermittent failures added a static wait here.\n import time\n time.sleep(5)\n self.checkElementonPage(\n locatorName=self.getDatanodesTab(returnLocatorName=True),\n locatorMessage='Datanodes Tab',\n retryCount=4,\n timeout=0.5\n )\n self.clickDatanodesTab()\n assert self.checkElementonPage(\n locatorName=self.getDatanodeContainer(returnLocatorName=True),\n locatorMessage='Datanodes Container',\n retryCount=4,\n timeout=0.5\n )\n\n @TaskReporter.report_test()\n def gotoDatanodeUI(self):\n self.gotoDatanodesTab()\n self.clickFirstDatanodeLink()\n\n @TaskReporter.report_test()\n def gotoDatanodesVolumeFailuresTab(self):\n self.checkElementonPage(\n locatorName=self.getDatanodesVolumeFailuresTab(returnLocatorName=True),\n locatorMessage='Datanodes Volume Failures Tab',\n retryCount=4,\n timeout=0.5\n )\n self.clickDatanodesVolumeFailuresTab()\n assert self.checkElementonPage(\n locatorName=self.getDatanodeVolumeFailureContainer(returnLocatorName=True),\n locatorMessage='Datanodes Volume Failures Container',\n retryCount=4,\n timeout=0.5\n )\n\n @TaskReporter.report_test()\n def gotoSnapshotTab(self):\n self.checkElementonPage(\n locatorName=self.getSnapshotTab(returnLocatorName=True),\n locatorMessage='Snapshot Tab',\n retryCount=4,\n timeout=0.5\n )\n self.clickSnapshotTab()\n assert self.checkElementonPage(\n locatorName=self.getSnapshotContainer(returnLocatorName=True),\n locatorMessage='Snapshot Container',\n retryCount=4,\n timeout=0.5\n )\n\n @TaskReporter.report_test()\n def gotoStartupProgressTab(self):\n self.checkElementonPage(\n locatorName=self.getStartupProgressTab(returnLocatorName=True),\n locatorMessage='Startup Progress Tab',\n retryCount=4,\n timeout=0.5\n )\n self.clickStartupProgressTab()\n assert self.checkElementonPage(\n locatorName=self.getStartupProgressContainer(returnLocatorName=True),\n locatorMessage='Startup Progress Container',\n retryCount=4,\n timeout=0.5\n )\n\n @TaskReporter.report_test()\n def gotoUtilitiesBrowseFS(self):\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdown(returnLocatorName=True),\n locatorMessage='Utilities Dropdown',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdown()\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdownBrowseFS(returnLocatorName=True),\n locatorMessage='Utilities Dropdown BrowseFS',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdownBrowseFS()\n assert self.checkElementonPage(\n locatorName=self.getBrowseDirectoryHeader(returnLocatorName=True),\n locatorMessage='Browse Directory Container',\n retryCount=4,\n timeout=5\n )\n\n @TaskReporter.report_test()\n def gotoUtilitiesLogs(self, errorExcepted=False):\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdown(returnLocatorName=True),\n locatorMessage='Utilities Dropdown',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdown()\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdownLogs(returnLocatorName=True),\n locatorMessage='Utilities Dropdown Logs',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdownLogs()\n if errorExcepted:\n assert self.checkElementonPage(\n locatorName=self.getLogsHeaderError(returnLocatorName=True),\n locatorMessage='Logs Error',\n retryCount=4,\n timeout=5\n )\n else:\n assert self.checkElementonPage(\n locatorName=self.getLogsHeader(returnLocatorName=True),\n locatorMessage='Logs ',\n retryCount=4,\n timeout=5\n )\n\n @TaskReporter.report_test()\n def gotoUtilitiesMetrics(self):\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdown(returnLocatorName=True),\n locatorMessage='Utilities Dropdown',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdown()\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdownMetrics(returnLocatorName=True),\n locatorMessage='Utilities Dropdown Metrics',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdownMetrics()\n assert self.checkElementonPage(\n locatorName=self.getMetricsJmx(returnLocatorName=True),\n locatorMessage='Metrics JMX',\n retryCount=4,\n timeout=5\n )\n\n @TaskReporter.report_test()\n def gotoUtilitiesConfiguration(self):\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdown(returnLocatorName=True),\n locatorMessage='Utilities Dropdown',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdown()\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdownConfiguration(returnLocatorName=True),\n locatorMessage='Utilities Dropdown Configuration',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdownConfiguration()\n assert self.checkElementonPage(\n locatorName=self.getConf(returnLocatorName=True), locatorMessage='Configuration', retryCount=4, timeout=5\n )\n\n @TaskReporter.report_test()\n def gotoUtilitiesProcessThreadDump(self):\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdown(returnLocatorName=True),\n locatorMessage='Utilities Dropdown',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdown()\n self.checkElementonPage(\n locatorName=self.getUtilitiesDropdownProcessThreadDump(returnLocatorName=True),\n locatorMessage='Utilities Dropdown Process Thread Dump',\n retryCount=4,\n timeout=0.5\n )\n self.clickUtilitiesDropdownProcessThreadDump()\n assert self.checkElementonPage(\n locatorName=self.getProcessThreadDump(returnLocatorName=True),\n locatorMessage='Process Thread Dump',\n retryCount=4,\n timeout=5\n )\n", "sub_path": "beaver/component/hdfsUI/nnUIPage.py", "file_name": "nnUIPage.py", "file_ext": "py", "file_size_in_byte": 16117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 21, "usage_type": "call"}, {"api_name": "beaver.component.hdfsUI.basePage.BasePage", "line_number": 26, "usage_type": "name"}, {"api_name": "beaver.component.hdfsUI.basePage.BasePage.__init__", "line_number": 28, "usage_type": "call"}, {"api_name": "beaver.component.hdfsUI.basePage.BasePage", "line_number": 28, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 36, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 36, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 38, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 40, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 40, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 42, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 42, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 45, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 48, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 53, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 54, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 54, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 55, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 55, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 56, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 56, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 57, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 57, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 149, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 149, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 152, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 152, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 155, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 155, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 158, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 158, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 161, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 161, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 164, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 164, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 167, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 167, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 170, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 170, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 173, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 173, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 176, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 176, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 179, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 179, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 182, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 182, "usage_type": "name"}, {"api_name": "beaver.seleniumDP.SeleniumDP.click", "line_number": 185, "usage_type": "call"}, {"api_name": "beaver.seleniumDP.SeleniumDP", "line_number": 185, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 187, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 187, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 207, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 203, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 203, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 222, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 222, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 227, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 227, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 243, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 243, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 259, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 259, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 275, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 275, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 298, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 298, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 329, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 329, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 352, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 352, "usage_type": "name"}, {"api_name": "taskreporter.taskreporter.TaskReporter.report_test", "line_number": 372, "usage_type": "call"}, {"api_name": "taskreporter.taskreporter.TaskReporter", "line_number": 372, "usage_type": "name"}]} +{"seq_id": "593382464", "text": "from django.shortcuts import render\nfrom django.views import View\nfrom django.conf import settings\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.http import Http404\nfrom django.contrib.auth.decorators import login_required, permission_required\n\nfrom .models import Collection\nfrom .forms import CollectionForm, EditCollectionForm\nfrom .serializers import CollectionSerializer\nfrom utils import restful\nfrom apps.article.serializers import ArticleSerializer\nfrom apps.article.models import Article\n\n\n\n\n@login_required\n@require_GET\ndef index(request):\n count = settings.ONE_PAGE_NEWS_COUNT\n collections = Collection.objects.all()[0:count]\n articles = Article.objects.filter(author=request.user).all()\n context = {\n 'collections': collections,\n 'articles': articles,\n }\n return render(request, 'collection/collections.html', context=context)\n\n@login_required\n@require_GET\ndef detail(request, collection_id):\n try:\n count = settings.ONE_PAGE_NEWS_COUNT\n collection = Collection.objects.prefetch_related('articles').get(pk=collection_id)\n context = {\n 'collection': collection,\n 'articles': collection.articles.all()[0:count]\n }\n return render(request, 'collection/collections-detail.html', context=context)\n except Collection.DoesNotExist:\n raise Http404\n\n@login_required\n@require_GET\ndef article_list(request, collection_id):\n page = int(request.GET.get('p', 1))\n # 分类为0,代表不进行任何分类\n collection_uid = request.GET.get('collection_id', 0)\n if collection_id != collection_uid:\n return restful.params_error(message=\"URL错误!\")\n start = (page - 1) * settings.ONE_PAGE_NEWS_COUNT\n end = start + settings.ONE_PAGE_NEWS_COUNT\n if collection_uid == 0:\n articles = Article.objects.select_related('category', 'author').all()[start:end]\n else:\n articles = Article.objects.select_related('category', 'author').filter(collection__uid=collection_uid)[start:end]\n serializers = ArticleSerializer(articles, many=True)\n # {'id':1, 'title':'abc', category:{'id':1,'name':'热点'}}\n data = serializers.data\n for d in data:\n tags = Article.objects.get(pk=d['uid']).tags.names()\n d['tags'] = list(tags)\n return restful.result(data=data)\n\n@login_required\n@require_POST\ndef new_collection(request):\n form = CollectionForm(data=request.POST)\n if form.is_valid():\n title = form.cleaned_data.get('title')\n desc = form.cleaned_data.get('desc')\n thumbnail = form.cleaned_data.get('thumbnail')\n tags = form.cleaned_data.get('tags')\n # Collection.objects.create(title=title, desc=desc, thumbnail=thumbnail, tags=tags)\n new_collection = form.save(commit=False)\n new_collection.save()\n form.save_m2m() # 保存标签\n return restful.ok()\n else:\n return restful.params_error(message=form.get_errors())\n\n@require_GET\ndef collection_list(request):\n page = int(request.GET.get('p', 1))\n start = (page - 1) * settings.ONE_PAGE_NEWS_COUNT\n end = start + settings.ONE_PAGE_NEWS_COUNT\n collections = Collection.objects.all()[start:end]\n serializers = CollectionSerializer(collections, many=True)\n data = serializers.data\n return restful.result(data=data)\n\n@login_required\n@require_POST\ndef edit_collection(request):\n form = EditCollectionForm(request.POST)\n if form.is_valid():\n collection_id = form.cleaned_data.get('collection_id')\n collection = Collection.objects.get(pk=collection_id)\n\n title = form.cleaned_data.get('title')\n desc = form.cleaned_data.get('desc')\n thumbnail = form.cleaned_data.get('thumbnail')\n tags = form.cleaned_data.get(\"tags\")\n\n collection.title = title\n collection.desc = desc\n collection.thumbnail = thumbnail\n collection.save()\n collection.tags.set(*tags, clear=True)\n return restful.ok()\n else:\n return restful.params_error(message=form.get_errors())\n\n\n@require_POST\ndef delete_collection(request):\n collection_uid = request.POST.get('collection_id')\n try:\n Collection.objects.filter(pk=collection_uid).delete()\n except Collection.DoesNotExist:\n return restful.params_error(message=\"删除的合集不存在\")\n\n return restful.ok()\n\n@require_POST\ndef add_article(request):\n articles_id_list = request.POST.getlist(\"articles_selected\")\n collection_id = request.POST.get(\"collection_id\")\n collection = Collection.objects.get(pk=collection_id)\n for article_id in articles_id_list:\n Article.objects.filter(pk=article_id).update(collection=collection)\n return restful.ok()\n\ndef remove_article(request):\n pass", "sub_path": "apps/collection/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.conf.settings.ONE_PAGE_NEWS_COUNT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Collection.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Collection.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 22, "usage_type": "name"}, {"api_name": "apps.article.models.Article.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "apps.article.models.Article.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "apps.article.models.Article", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 18, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_PAGE_NEWS_COUNT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Collection.objects.prefetch_related", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Collection.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 35, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Collection.DoesNotExist", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 41, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 42, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 30, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.restful.params_error", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 51, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_PAGE_NEWS_COUNT", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 52, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_PAGE_NEWS_COUNT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "apps.article.models.Article.objects.select_related", "line_number": 55, "usage_type": "call"}, {"api_name": "apps.article.models.Article.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "apps.article.models.Article", "line_number": 55, "usage_type": "name"}, {"api_name": "apps.article.models.Article.objects.select_related", "line_number": 57, "usage_type": "call"}, {"api_name": "apps.article.models.Article.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "apps.article.models.Article", "line_number": 57, "usage_type": "name"}, {"api_name": "apps.article.serializers.ArticleSerializer", "line_number": 58, "usage_type": "call"}, {"api_name": "serializers.data", "line_number": 60, "usage_type": "attribute"}, {"api_name": "apps.article.models.Article.objects.get", "line_number": 62, "usage_type": "call"}, {"api_name": "apps.article.models.Article.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "apps.article.models.Article", "line_number": 62, "usage_type": "name"}, {"api_name": "utils.restful.result", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 44, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 45, "usage_type": "name"}, {"api_name": "forms.CollectionForm", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.restful.ok", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 79, "usage_type": "name"}, {"api_name": "utils.restful.params_error", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 81, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 66, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 67, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_PAGE_NEWS_COUNT", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 86, "usage_type": "name"}, {"api_name": "django.conf.settings.ONE_PAGE_NEWS_COUNT", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "models.Collection.objects.all", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Collection.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 88, "usage_type": "name"}, {"api_name": "serializers.CollectionSerializer", "line_number": 89, "usage_type": "call"}, {"api_name": "serializers.data", "line_number": 90, "usage_type": "attribute"}, {"api_name": "utils.restful.result", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 91, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_GET", "line_number": 83, "usage_type": "name"}, {"api_name": "forms.EditCollectionForm", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Collection.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Collection.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 99, "usage_type": "name"}, {"api_name": "utils.restful.ok", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 111, "usage_type": "name"}, {"api_name": "utils.restful.params_error", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 113, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 93, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 94, "usage_type": "name"}, {"api_name": "models.Collection.objects.filter", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Collection.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 120, "usage_type": "name"}, {"api_name": "models.Collection.DoesNotExist", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 121, "usage_type": "name"}, {"api_name": "utils.restful.params_error", "line_number": 122, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 122, "usage_type": "name"}, {"api_name": "utils.restful.ok", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 124, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Collection.objects.get", "line_number": 130, "usage_type": "call"}, {"api_name": "models.Collection.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "models.Collection", "line_number": 130, "usage_type": "name"}, {"api_name": "apps.article.models.Article.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "apps.article.models.Article.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "apps.article.models.Article", "line_number": 132, "usage_type": "name"}, {"api_name": "utils.restful.ok", "line_number": 133, "usage_type": "call"}, {"api_name": "utils.restful", "line_number": 133, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "176825906", "text": "# -*- coding: utf-8 -*-\nimport os\nimport msgfmt\nimport setuptools\nfrom setuptools.command.bdist_egg import bdist_egg\nfrom distutils.command.build import build\n\nAPPNAME = 'jeeves'\n\n\nclass jeeves_bdist_egg(bdist_egg):\n def run(self):\n self.run_command('build_i18n')\n setuptools.command.bdist_egg.bdist_egg.run(self)\n\n\nclass jeeves_build_i18n(setuptools.Command):\n description = 'compile PO translations to MO files'\n\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for root, _, filenames in os.walk(os.path.dirname(__file__)):\n for po_filename in filenames:\n filename, ext = os.path.splitext(po_filename)\n if ext != '.po':\n continue\n path = os.path.join(root, filename)\n po_path = os.extsep.join([path, 'po'])\n mo_path = os.extsep.join([path, 'mo'])\n print('compile %s -> %s' % (po_path, mo_path))\n with open(mo_path, 'wb') as f:\n f.write(msgfmt.Msgfmt(po_path).get())\n\n\nclass jeeves_build(build):\n sub_commands = build.sub_commands + [\n ('build_i18n', None)\n ]\n\n\nsetuptools.setup(\n name='jeeves-pa',\n version='0.1.0',\n url='http://github.com/narfman0/jeeves/',\n license='MIT',\n\n author='Jon Robison',\n author_email='narfman0@gmail.com',\n\n description=(\n 'Jeeves is an open source platform for developing ' +\n 'always-on, voice-controlled applications.'\n ),\n\n install_requires=[\n 'APScheduler',\n 'argparse',\n 'mad',\n 'python-slugify',\n 'pytz',\n 'PyYAML',\n 'requests',\n ],\n\n packages=[APPNAME],\n package_data={\n APPNAME: [\n 'data/audio/*.wav',\n 'data/locale/*.po',\n 'data/locale/*.mo',\n 'data/standard_phrases/*.txt',\n '../plugins/*/*/*.py',\n '../plugins/*/*/plugin.info',\n '../plugins/*/*/*.txt',\n '../plugins/*/*/locale/*.po',\n '../plugins/*/*/locale/*.mo',\n '../plugins/*/*/tests/*.py'\n ]\n },\n\n data_files=[\n ('share/doc/%s' % APPNAME, [\n 'LICENSE.md',\n 'README.rst'\n ])\n ],\n\n entry_points={\n 'console_scripts': [\n 'jeeves = %s:main' % APPNAME,\n 'jeeves-populate = %s.populate:run' % APPNAME,\n ],\n },\n\n cmdclass={\n 'bdist_egg': jeeves_bdist_egg,\n 'build': jeeves_build,\n 'build_i18n': jeeves_build_i18n,\n },\n\n test_suite='tests',\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "setuptools.command.bdist_egg.bdist_egg", "line_number": 11, "usage_type": "name"}, {"api_name": "setuptools.command.bdist_egg.bdist_egg.run", "line_number": 14, "usage_type": "call"}, {"api_name": "setuptools.command", "line_number": 14, "usage_type": "attribute"}, {"api_name": "setuptools.Command", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.extsep.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.extsep", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.extsep.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.extsep", "line_number": 36, "usage_type": "attribute"}, {"api_name": "msgfmt.Msgfmt", "line_number": 39, "usage_type": "call"}, {"api_name": "distutils.command.build.build", "line_number": 42, "usage_type": "name"}, {"api_name": "distutils.command.build.build.sub_commands", "line_number": 43, "usage_type": "attribute"}, {"api_name": "distutils.command.build.build", "line_number": 43, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "589651394", "text": "import logging\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.http import Http404, JsonResponse\nfrom django.views.generic import View\n\nfrom kombu.utils.url import _parse_url as parse_redis_url\nfrom redis import ConnectionError as RedisConnectionError, ResponseError as RedisResponseError, StrictRedis\n\nlog = logging.getLogger(__name__)\n\nUP = \"up\"\nDOWN = \"down\"\nNO_CONFIG = \"no config found\"\nHTTP_OK = 200\nSERVICE_UNAVAILABLE = 503\nTIMEOUT_SECONDS = 5\n\n\ndef get_redis_info():\n for conf_name in ('REDIS_URL',):\n if hasattr(settings, conf_name):\n url = getattr(settings, conf_name)\n if url.startswith('redis://'):\n break\n else:\n log.error(\"No redis connection info found in settings.\")\n return {\"status\": NO_CONFIG}\n _, host, port, _, password, database, _ = parse_redis_url(url)\n\n start = datetime.now()\n try:\n rdb = StrictRedis(\n host=host, port=port, db=database,\n password=password, socket_timeout=TIMEOUT_SECONDS,\n )\n info = rdb.info()\n except (RedisConnectionError, TypeError) as ex:\n log.error(\"Error making Redis connection: %s\", ex.args)\n return {\"status\": DOWN}\n except RedisResponseError as ex:\n log.error(\"Bad Redis response: %s\", ex.args)\n return {\"status\": DOWN, \"message\": \"auth error\"}\n micro = (datetime.now() - start).microseconds\n del rdb # the redis package does not support Redis's QUIT.\n ret = {\n \"status\": UP, \"response_microseconds\": micro,\n }\n fields = (\"uptime_in_seconds\", \"used_memory\", \"used_memory_peak\")\n ret.update({x: info[x] for x in fields})\n return ret\n\n\nclass HealthStatusView(View):\n\n def get(self, request):\n\n token = request.GET.get(\"token\", \"\")\n if not token or token != settings.HEALTH_TOKEN:\n raise Http404()\n\n info = {}\n check_mapping = {\n 'REDIS': (get_redis_info, 'redis'),\n }\n\n for setting, (check_fn, key) in check_mapping.items():\n if setting in settings.HEALTH_CHECK:\n log.debug(f'getting: {key}')\n info[key] = check_fn()\n log.debug(f'{key} done')\n\n code = HTTP_OK\n status_all = UP\n for key in info:\n if info[key][\"status\"] == DOWN:\n code = SERVICE_UNAVAILABLE\n status_all = DOWN\n break\n\n info[\"status_all\"] = status_all\n\n resp = JsonResponse(info)\n resp.status_code = code\n return resp\n", "sub_path": "apps/health_status/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 24, "usage_type": "argument"}, {"api_name": "kombu.utils.url._parse_url", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "redis.StrictRedis", "line_number": 34, "usage_type": "call"}, {"api_name": "redis.ConnectionError", "line_number": 39, "usage_type": "name"}, {"api_name": "redis.ResponseError", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.settings.HEALTH_TOKEN", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 61, "usage_type": "call"}, {"api_name": "django.conf.settings.HEALTH_CHECK", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 69, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "146674422", "text": "#File name: Exercise2_10\n#Author: Pekka Lehtola\n#Description: Clock with alarm function.\n\n\n#Importing time for local tima, keyboard for setting up alarm and playsound\n#To playback alarm sound\nimport time\nimport keyboard\nfrom playsound import playsound\n\nclass Clock:\n\n def __init__(self):\n\n #Hour, minute and seconds set as local time. Alarm turned intialy\n #off with alarm time set to 00:00.\n self.hour = int(time.strftime(\"%H\"))\n self.minute = int(time.strftime(\"%M\"))\n self.second = int(time.strftime(\"%S\"))\n self.alarm_time_hour = 0\n self.alarm_time_minute = 0\n self.alarm_set = False\n self.alarm_sound = False\n\n #Used for updating clocks time.\n def update_clock(self):\n\n self.hour = int(time.strftime(\"%H\"))\n self.minute = int(time.strftime(\"%M\"))\n self.second = int(time.strftime(\"%S\"))\n\n #Takes time input from user and sets it as alarms time.\n #Prints out confirmation that alarm was set to users input.\n #Try used for avoiding crash if ValueError occours\n def setting_alarm(self):\n\n try:\n self.alarm_time_hour = int(input(\"Set up alarms hour: \"))\n self.alarm_time_minute = int(input(\"Set up alarms minute: \"))\n self.alarm_set = True\n print(\"Alarm set to: \", str(self.alarm_time_hour) + \":\" + str(self.alarm_time_minute))\n\n except:\n print(\"Value error\", end=\"\\n\\n\")\n print(\"Time is: \", str(clock.hour) + \":\" + str(clock.minute))\n\n #Sets alarm back to initial values.\n def turning_alarm_off(self):\n\n self.__init__()\n print(\"Alarm turned off at\", str(clock.hour) + \":\" + str(clock.minute))\n\nclock = Clock()\n\n#Printing out starting time and instruction for setting alarm.\nprint(\"To setup alarm press SPACE. Hold down SPACE to end alarm.\", end=\"\\n\\n\")\nprint(\"Time is: \", str(clock.hour) + \":\" + str(clock.minute))\n\n\n#Main function\ndef main():\n\n while True:\n\n #Detects if space is pressed. Depending if alarm is on or not\n #sets up alarm status.\n if keyboard.is_pressed(\"space\"):\n\n if not clock.alarm_set:\n\n clock.setting_alarm()\n\n else:\n clock.turning_alarm_off()\n time.sleep(1)\n\n # If alarm is triggered plays alarm sound and prints out cheerful\n # message.Time.sleep used for avoiding alarm getting annoying.\n if clock.alarm_sound:\n print(\"Its time to wake up!\")\n playsound(\"alarm.mp3\")\n time.sleep(0.4)\n\n #Checks if its alarm time.\n if int(clock.alarm_time_hour) == (clock.hour):\n if int(clock.alarm_time_minute) == clock.minute:\n clock.alarm_sound = True\n\n #Checks if local time has changed, if yes updates the clock.\n #Prints out clock every minute.\n if clock.minute != int(time.strftime(\"%M\")):\n clock.update_clock()\n print(\"Time is: \", str(clock.hour) + \":\" + str(clock.minute))\n\nmain()\n", "sub_path": "OOP/Exercise2/Exercise2_10.py", "file_name": "Exercise2_10.py", "file_ext": "py", "file_size_in_byte": 3030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "time.strftime", "line_number": 18, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 19, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 20, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 29, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 30, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 31, "usage_type": "call"}, {"api_name": "keyboard.is_pressed", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "playsound.playsound", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "560752917", "text": "\r\n\r\nimport pandas as pd\r\n\r\nimport streamlit as st\r\nimport tweepy\r\nfrom PIL import Image\r\nfrom textblob import TextBlob\r\nimport plotly.express as px\r\n\r\n#Calculating Percentage\r\n\r\ndef percentage(part, whole):\r\n temp = 100 * float(part) / float(whole)\r\n return format(temp, '.2f')\r\n\r\n# clean tweets\r\n\r\ndef cleantwt(text):\r\n text = re.sub(r'@[A-Za-z0-9]+', '', text)\r\n text = re.sub(r'#', '', text)\r\n text = re.sub(r'RT[\\s]+', '', text)\r\n text = re.sub(r'https:?\\/\\/\\S+', '', text)\r\n\r\n return text\r\n\r\n#Title For Web Based Application\r\n\r\nst.title('*Twitter Sentiments Analysis*')\r\nst.subheader(\"*Sentiments Analysis of tweets using Tweepy and TextBlob*\")\r\n\r\n# Import Image\r\n\r\nimage = Image.open('C:/Users/CODER/Downloads/R-project-sentiment-analysis.jpg')\r\nst.image(image, caption='Sentiments', use_column_width=True)\r\n\r\n# Twitter API Credentials\r\n\r\nconsumerKey = ''\r\nconsumerSecret = ''\r\nKey = ''\r\nSecret = ''\r\n\r\n# Authenticating\r\n\r\na = tweepy.OAuthHandler(consumerKey, consumerSecret)\r\na.set_access_token(Key, Secret)\r\napi = tweepy.API(a)\r\n\r\n# input for tweet to be searched\r\n\r\nSearchTweets = st.sidebar.text_input('Enter the keyword/hastag to search about','Donald')\r\nNoOfTweets = st.sidebar.slider('Number of tweets',10,10000,150)\r\n\r\n# Getting Tweets With The Help Of Tweepy\r\n\r\nTweets = tweepy.Cursor(api.search, q=SearchTweets, lang=\"en\").items(NoOfTweets)\r\n\r\n# creating some sentiments variable\r\npolarity = 0\r\npositive = 0\r\nwpositive = 0\r\nspositive = 0\r\nnegative = 0\r\nwnegative = 0\r\nsnegative = 0\r\nneutral = 0\r\n\r\n\r\n# Clean Tweets By calling CleanTweet Function\r\n\r\nTweets = [cleantwt(tweet.text) for tweet in Tweets]\r\n\r\n# Sentiments Statistics \r\n\r\nfor tweet in Tweets:\r\n \r\n analysis = TextBlob(tweet)\r\n\r\n polarity += analysis.sentiment.polarity # adding up polarities to find the average later\r\n\r\n if (analysis.sentiment.polarity == 0): # adding reaction of how people are reacting to find average later\r\n neutral += 1\r\n elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3):\r\n wpositive += 1\r\n elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6):\r\n positive += 1\r\n elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1):\r\n spositive += 1\r\n elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0):\r\n wnegative += 1\r\n elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3):\r\n negative += 1\r\n elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6):\r\n snegative += 1\r\n\r\n# Calculating Percentage By Calling Percentage function\r\n\r\npositive = percentage(positive, NoOfTweets)\r\nwpositive = percentage(wpositive, NoOfTweets)\r\nspositive = percentage(spositive, NoOfTweets)\r\nnegative = percentage(negative, NoOfTweets)\r\nwnegative = percentage(wnegative, NoOfTweets)\r\nsnegative = percentage(snegative, NoOfTweets)\r\nneutral = percentage(neutral, NoOfTweets)\r\n\r\n#putting up Data Together\r\n\r\ndata = {'Sentiment':['Positive','Neutral','Negative','Weaklypositive','StronglyPositive','WeaklyNegative','StronglyNegative'],\r\n 'Result':[positive,wpositive,spositive,negative,wnegative,snegative,neutral]\r\n }\r\n \r\n# Create DataFrame using Pandas\r\ndf = pd.DataFrame(data)\r\n\r\n\r\n# Checking Sentiments With The Help Of TextBlob\r\n\r\nsentiment_objects = [TextBlob(tweet) for tweet in Tweets]\r\nsentiment_objects[0].polarity, sentiment_objects[0]\r\n\r\nsentiment_values = [[str(tweet), tweet.sentiment.polarity] for tweet in sentiment_objects]\r\nsentiment_values[0]\r\n\r\n\r\n# creating DataFrame using Pandas\r\n\r\nsentiment_df = pd.DataFrame(sentiment_values, columns=[\"tweet\", \"polarity\"])\r\nst.subheader(\"Tweets: \")\r\nst.dataframe(sentiment_df,width=1080)\r\n\r\n\r\n# Printing Statistics\r\n\r\n\r\nst.subheader(\"How people are reacting on \" + SearchTweets + \" by analyzing \" + str(NoOfTweets) + \" tweets.\")\r\nfig = px.pie(df, values='Result', names='Sentiment', title=\" \")\r\nst.plotly_chart(fig)\r\nst.subheader(\"\"\"Detailed Report: \"\"\")\r\nst.table(df)", "sub_path": "sentimental analysis.py", "file_name": "sentimental analysis.py", "file_ext": "py", "file_size_in_byte": 4124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "streamlit.title", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 35, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 46, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 48, "usage_type": "call"}, {"api_name": "streamlit.sidebar.text_input", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 52, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.slider", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tweepy.Cursor", "line_number": 57, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 130, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 136, "usage_type": "call"}, {"api_name": "plotly.express.pie", "line_number": 137, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 137, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 138, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 139, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "56374323", "text": "import unittest\nfrom nose.tools import istest, raises\nfrom .sources import hipersonica as sources\nfrom ....parser import NotValidReviewException\nfrom ....parser.hipersonicaparser import HipersonicaParser as Parser\n\n\nclass HipersonicaParserTest(unittest.TestCase):\n @istest\n def returns_valid_reviews_list_from_reviews_list(self):\n reviews = Parser.fetch_url_reviews(sources.REVIEWS_LIST_HTML)\n expected = [\n u'http://www.hipersonica.com/criticas/' +\n 'pj-harvey-to-bring-you-my-love-y' +\n '-el-punk-se-ahogo-bajo-el-agua',\n u'http://www.hipersonica.com/criticas/' +\n 'umberto-confrontations-electronica-' +\n 'progresiva-para-flotar-en-la-carretera',\n u'http://www.hipersonica.com/criticas/' +\n 'thee-oh-sees-floating-coffin-ya-juegan' +\n '-en-la-liga-de-los-mas-grandes',\n u'http://www.hipersonica.com/criticas/' +\n 'hela-broken-cross-doom-metal-para-paladares' +\n '-metaleros-y-no-tan-metaleros',\n u'http://www.hipersonica.com/criticas/' +\n 'inter-arma-sky-burial-el-placer-de-perderse-en' +\n '-un-universo-de-sufrimiento-infinito',\n u'http://www.hipersonica.com/criticas/' +\n 'bastille-bad-blood-quien-mucho-abarca-poco-aprieta',\n u'http://www.hipersonica.com/criticas/' +\n 'orchid-the-mouths-of-madness-un-must-have-para-doomsters',\n u'http://www.hipersonica.com/criticas/' +\n 'golden-grrrls-golden-grrrls-el-inefable-indie-pop' +\n '-llamando-a-tu-puerta',\n u'http://www.hipersonica.com/criticas/' +\n 'smog-rain-on-lens-2001-el-cruzacables-profesional',\n u'http://www.hipersonica.com/criticas/' +\n 'ghost-infestissumam-lo-importante-no-es-ser-el-' +\n 'primero-sino-aparentar-serlo'\n ]\n self.assertEquals(expected, reviews)\n\n @istest\n @raises(NotValidReviewException)\n def raises_not_valid_review_exception(self):\n Parser.get_review_info(sources.REVIEW_INVALID_ALBUM_HTML)\n\n @istest\n def returns_valid_information_for_burial_truantroughsleeper(self):\n information = Parser.get_review_info(\n sources.REVIEW_ALBUM_HTML)\n expected = {\n \"artist\": u\"Burial\",\n \"album\": u\"Truant/Rough Sleeper EP\",\n \"rating\": 9.5,\n \"rating_max\": 10,\n }\n self.assertDictContainsSubset(information, expected)\n", "sub_path": "website/app/tests/unit/parser/test_hipersonicaparser.py", "file_name": "test_hipersonicaparser.py", "file_ext": "py", "file_size_in_byte": 2522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "parser.hipersonicaparser.HipersonicaParser.fetch_url_reviews", "line_number": 11, "usage_type": "call"}, {"api_name": "parser.hipersonicaparser.HipersonicaParser", "line_number": 11, "usage_type": "name"}, {"api_name": "sources.hipersonica.REVIEWS_LIST_HTML", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sources.hipersonica", "line_number": 11, "usage_type": "name"}, {"api_name": "nose.tools.istest", "line_number": 9, "usage_type": "name"}, {"api_name": "parser.hipersonicaparser.HipersonicaParser.get_review_info", "line_number": 46, "usage_type": "call"}, {"api_name": "parser.hipersonicaparser.HipersonicaParser", "line_number": 46, "usage_type": "name"}, {"api_name": "sources.hipersonica.REVIEW_INVALID_ALBUM_HTML", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sources.hipersonica", "line_number": 46, "usage_type": "name"}, {"api_name": "nose.tools.istest", "line_number": 43, "usage_type": "name"}, {"api_name": "nose.tools.raises", "line_number": 44, "usage_type": "call"}, {"api_name": "parser.NotValidReviewException", "line_number": 44, "usage_type": "argument"}, {"api_name": "parser.hipersonicaparser.HipersonicaParser.get_review_info", "line_number": 50, "usage_type": "call"}, {"api_name": "parser.hipersonicaparser.HipersonicaParser", "line_number": 50, "usage_type": "name"}, {"api_name": "sources.hipersonica.REVIEW_ALBUM_HTML", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sources.hipersonica", "line_number": 51, "usage_type": "name"}, {"api_name": "nose.tools.istest", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "471723247", "text": "\"\"\"\nSummary of ``centerline_sidewalk_coverage.py``\n----------------------------------------------\n\nThis script evaluates a centerline dataset against\na sidewalk dataset.\n\nUsing a search distance of 25 meters, it identifies\nthe sum of the lengths of all sidewalk segments that\n - 1) is line_type=1\n - 2) intersection is more than 25 meters or the segment itself is less than 25 meters\n\n\nAnother approach compared the angle of the centerline against\nthe angle of each sidewalk geometry. This worked really well in\ngrid-shaped areas, but didn't fare as well with curved/irregular features.\n\n\"\"\"\n\nfrom tqdm import tqdm\n\nfrom postgis_helpers import PostgreSQL\nfrom sidewalk_gaps import CREDENTIALS\n\ndatabase_name = \"sidewalk_gaps\"\n\n\ndef classify_centerlines(\n db: PostgreSQL,\n schema: str,\n tbl: str,\n new_col: str = \"sidewalk\"\n):\n\n # Get a list of all centerlines we want to iterate over.\n oid_query = f\"\"\"\n SELECT objectid FROM {schema}.{tbl}\n \"\"\"\n\n # But first... check if the new_col exists\n # If so, iterate over null features\n # Otherwise, make the column and operate on the entire dataset\n\n column_already_existed = new_col in db.table_columns_as_list(tbl, schema=schema)\n\n if column_already_existed:\n print(\"Picking up where last left off...\")\n oid_query += f\"\"\"\n WHERE {new_col} IS NULL\n \"\"\"\n else:\n print(\"Analyzing for the first time...\")\n db.table_add_or_nullify_column(tbl, new_col, \"FLOAT\", schema=schema)\n\n # Hit the database\n oid_list = db.query_as_list(oid_query)\n\n # pop the results out of tuples into a simple list\n oid_list = [x[0] for x in oid_list]\n\n query_template = f\"\"\"\n SELECT\n SUM(\n ST_LENGTH(\n ST_INTERSECTION(sw.geom, (SELECT ST_BUFFER(c.geom,25)))\n )\n )\n FROM\n {schema}.sidewalks sw, {schema}.centerlines c\n where\n c.objectid = OID_PLACEHOLDER\n AND\n ST_INTERSECTS(sw.geom, (SELECT ST_BUFFER(c.geom,25)))\n AND\n sw.line_type = 1\n AND\n (\n ST_LENGTH(\n ST_INTERSECTION(sw.geom, (SELECT ST_BUFFER(c.geom,25)))\n ) > 25\n OR ST_LENGTH(sw.geom) <= 25\n )\n \"\"\"\n for oid in tqdm(oid_list, total=len(oid_list)):\n oid_query = query_template.replace(\"OID_PLACEHOLDER\", str(oid))\n\n sidwalk_length_in_meters = db.query_as_single_item(oid_query)\n\n if not sidwalk_length_in_meters:\n sidwalk_length_in_meters = 0\n\n update_query = f\"\"\"\n UPDATE {schema}.{tbl} c\n SET {new_col} = {sidwalk_length_in_meters}\n WHERE objectid = {oid}\n \"\"\"\n db.execute(update_query)\n\n\nif __name__ == \"__main__\":\n pass\n", "sub_path": "sidewalk_gaps/segments/centerline_sidewalk_coverage.py", "file_name": "centerline_sidewalk_coverage.py", "file_ext": "py", "file_size_in_byte": 2890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "postgis_helpers.PostgreSQL", "line_number": 29, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "307084095", "text": "from setuptools import setup, find_packages\nfrom setuptools import Command\nimport sys, os\n\nclass PyTest(Command):\n user_options = []\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n def run(self):\n import sys,subprocess\n errno = subprocess.call([sys.executable, 'runtests.py'])\n raise SystemExit(errno)\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nsetup(name='kotti_contactform',\n version= '0.1.1a2',\n description=\"Simple contact form for Kotti sites\",\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python\",\n \"Framework :: Pylons\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: BSD License\",\n ],\n keywords='kotti contact form',\n maintainer='Christian Neumann',\n maintainer_email='cneumann@datenkarussell.de',\n url='http://pypi.python.org/pypi/kotti_contactform',\n license='BSD License',\n packages=['kotti_contactform'],\n package_data={'kotti_contactform': ['templates/*.pt',\n 'locale/*.*',\n 'locale/*/LC_MESSAGES/*.*']},\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'Kotti >= 0.6.0b1',\n 'Babel',\n 'alembic',\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n message_extractors = { \"kotti_contactform\": [\n (\"**.py\", \"lingua_python\", None ),\n (\"**.pt\", \"lingua_xml\", None ),\n ]},\n cmdclass = {'test': PyTest},\n )\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "setuptools.Command", "line_number": 5, "usage_type": "name"}, {"api_name": "subprocess.call", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "358517524", "text": "# -*- coding: utf-8 -*-\n\nfrom PyQt5.QtCore import QStandardPaths, QFile, QTextStream\nfrom PyQt5.QtPrintSupport import QPrinter, QPrintDialog, QPrintPreviewDialog\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QPainter\nfrom PyQt5 import uic\nimport sys\n\n# Подключение модуля\n#from mwindow import *\n\n''' Глобальные переменные '''\n# Индикатор редактирования файла\nisEdited = False\n# Имя файла\nfileName = \"\"\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n uic.loadUi(\"mwindow.ui\", self)\n self.setWindowIcon(QIcon(\"pict/python.png\"))\n self.setWindowTitle(\"PyEdit - Новый документ\")\n\n ''' Выход '''\n self.exitAction.triggered.connect(qApp.quit)\n\n ''' О программе '''\n self.aboutAction.triggered.connect(lambda: (QMessageBox.about(self, \"О MainWindow PyQt5\",\n \" MainWindow PyQt5 v 1.0.0 \\n\\n\" +\n \"Автор : \\tmasterarrow\\n\\n\" +\n \"email : \\tmasterarrows@gmail.com\\n\\n\" +\n \" Copyright © 2018\\n\")))\n\n ''' О Qt '''\n self.aboutQtAction.triggered.connect(lambda: (QMessageBox.aboutQt(self, \"О Qt\")));\n\n ''' Содержимое mainEdit изменено '''\n self.mainEdit.textChanged.connect(self.textChanged)\n\n ''' Открыть файл '''\n self.openAction.triggered.connect(self.openDocument)\n\n ''' Сохранить файл '''\n self.saveAction.triggered.connect(self.saveDocument)\n\n ''' Новый файл '''\n self.newFileAction.triggered.connect(self.newDocument)\n\n ''' Закрыть фал '''\n self.closeFileAction.triggered.connect(self.closeDocument)\n\n ''' Печать файла '''\n self.printAction.triggered.connect(self.printDocument)\n\n ''' Показ строки состояния '''\n self.statusBarAction.triggered.connect(lambda: self.statusBar.setVisible(self.statusBarAction.isChecked()))\n\n ''' Показ панели инструментов '''\n self.toolBarAction.triggered.connect(lambda: self.toolBar.setVisible(self.toolBarAction.isChecked()))\n ''' __init__() '''\n\n\n def openDocument(self):\n '' '''\n Открыть файл\n '' '''\n\n # Глобальная переменная\n global isEdited, fileName\n\n if isEdited :\n if QMessageBox.question(self, self.windowTitle(), \"Сохранить файл?\", QMessageBox.Yes | QMessageBox.No) == QMessageBox.Yes :\n self.saveDocument()\n\n # Диалог открытия файла\n filename = QFileDialog.getOpenFileName(self, \"Выберите файл\",\n QStandardPaths.locate(QStandardPaths.HomeLocation, str()),\n \"Текстовые файлы (*.txt)\")\n\n # fileName -> tuple (кортеж), в fileName[0] - хранится имя файла\n\n # Файл не выбран\n # len() - длина строки\n if len(filename[0]) == 0 : return\n\n fileName = filename[0]\n file = QFile(fileName)\n\n # Открыть файл для чтения\n if not file.open(QFile.ReadOnly | QFile.Text):\n QMessageBox.critical(self, \"Ошибка\", \"Не удалось открыть файл\", QMessageBox.Ok)\n return\n\n # Считать все данные с файла через поток\n read = QTextStream(file)\n self.mainEdit.setText(read.readAll())\n\n file.close()\n\n # Заголовок окна\n '' '''\n fileName[0] [fileName[0].rindex(\"/\") + 1: len(fileName[0])] - извлечение имени файла из строки (срез строки)\n fileName[0] [1 : 2] - строка начиная с символа под индексом 1 по символ под индексом 2\n fileName[0].rindex(\"/\") - индекс последнего вхождения символа \"/\" в строке\n len() - длина строки\n '' '''\n self.setWindowTitle(\"PyEdit - \" + fileName [fileName.rindex(\"/\") + 1: len(fileName)])\n\n # Сообщение в строке состояния\n self.statusBar.showMessage(\" Готово... \", 2000)\n\n # Файл не редактирован\n isEdited = False\n ''' openDocument() '''\n\n def saveDocument(self):\n '' '''\n Сохранить файл\n '' '''\n\n global isEdited, fileName\n\n if not isEdited :\n return\n\n # Диалог сохранения файла\n if len(fileName) == 0 :\n filename = QFileDialog.getSaveFileName(self, \"Укажите папку\", QStandardPaths.locate(QStandardPaths.HomeLocation, str()),\n \"текстовые файлы (*.txt)\")\n if len(filename[0]) == 0 : return\n fileName = filename[0]\n\n file = QFile(fileName)\n\n if not file.open(QFile.WriteOnly | QFile.Text):\n QMessageBox.critical(self, \"Ошибка\", \"Не удалось сохранить файл\", QMessageBox.Ok)\n\n # Записать данные в файл через поток\n write = QTextStream(file)\n # Запись в поток\n write << self.mainEdit.toPlainText()\n # Запись на диск\n write.flush()\n\n file.close()\n\n self.setWindowTitle(\"PyEdit - \" + fileName [fileName.rindex(\"/\") + 1: len(fileName)])\n\n # Сообщение в строке состояния\n self.statusBar.showMessage(\" Файл сохранен... \", 2000)\n\n # Файл не редактирован\n isEdited = False\n ''' saveDocument() '''\n\n def textChanged(self):\n global isEdited, fileName\n\n if isEdited :\n return\n\n # Текст в mainEdit изменен\n isEdited = True\n\n self.setWindowTitle(self.windowTitle() + \" *\")\n ''' textChanged() '''\n\n def newDocument(self):\n global isEdited, fileName\n\n if isEdited :\n if QMessageBox.question(self, self.windowTitle(), \"Сохранить файл?\", QMessageBox.Yes | QMessageBox.No) == QMessageBox.Yes:\n self.saveDocument()\n\n isEdited = False\n fileName = \"\"\n\n self.mainEdit.clear()\n\n self.setWindowTitle(\"PyEdit - Новый документ\")\n ''' newDocument() '''\n\n def closeDocument(self):\n global isEdited, fileName\n\n isEdited = False\n fileName = \"\"\n\n self.mainEdit.clear()\n\n self.setWindowTitle(\"PyEdit - Новый документ\")\n ''' closeDocument() '''\n\n def printDocument(self):\n # Выбор принтера\n printer = QPrinter()\n printer.setFullPage(True)\n\n # Диалог печати\n printDlg = QPrintPreviewDialog(printer)\n\n printDlg.setWindowTitle(\"Предварительный просмотр\")\n\n if printDlg.exec_() == QDialog.Rejected : return\n\n #self.mainEdit.print(printer)\n\n painter = QPainter(printer)\n\n painter.begin(printer)\n painter.drawText(0, 0, self.mainEdit.toPlainText())\n painter.end()\n\n # Сообщение в строке состояния\n self.statusBar.showMessage(\" Файл отправлен на принтер... \", 2000)\n ''' printDocument() '''\n''' class MainWindow() '''\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MainWindow()\n ex.show()\n sys.exit(app.exec_())", "sub_path": "PyEdit/window.py", "file_name": "window.py", "file_ext": "py", "file_size_in_byte": 8061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "PyQt5.uic.loadUi", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QStandardPaths.locate", "line_number": 80, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QStandardPaths", "line_number": 80, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QStandardPaths.HomeLocation", "line_number": 80, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QFile", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QFile.ReadOnly", "line_number": 93, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QFile", "line_number": 93, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QFile.Text", "line_number": 93, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QTextStream", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QStandardPaths.locate", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QStandardPaths", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QStandardPaths.HomeLocation", "line_number": 131, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QFile", "line_number": 136, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QFile.WriteOnly", "line_number": 138, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QFile", "line_number": 138, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QFile.Text", "line_number": 138, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QTextStream", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt5.QtPrintSupport.QPrinter", "line_number": 199, "usage_type": "call"}, {"api_name": "PyQt5.QtPrintSupport.QPrintPreviewDialog", "line_number": 203, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 211, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 224, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 227, "usage_type": "call"}]} +{"seq_id": "489310742", "text": "# Load environment variables\nimport argparse\nimport datetime\nfrom bcrypt import hashpw, gensalt\nfrom projects import app\nfrom projects.models import User\n\n# Handling arguments\nparser = argparse.ArgumentParser(description=\"Start the projects website\")\nparser.add_argument(\n \"--debug\",\n dest=\"debug\",\n action=\"store_true\",\n default=False,\n help=\"Whether the application should run in debug mode\",\n)\nparser.add_argument(\n \"--ip\",\n dest=\"ip\",\n type=str,\n default=\"127.0.0.1\",\n help=\"The interface the webserver should listen on\",\n)\nparser.add_argument(\n \"--port\",\n dest=\"port\",\n type=int,\n default=8080,\n help=\"The port the webserver should listen on\",\n)\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n # Implement test user\n if args.debug:\n user = User.get_with_first(\"email\", \"test@test.com\")\n if user is None:\n user = User(\n email=\"test@test.com\",\n password=hashpw(bytes(\"test\", \"utf-8\"), gensalt()),\n projects=[],\n is_active=True,\n is_authenticated=True,\n is_anonymous=False,\n confirmed_on=datetime.datetime.now(),\n )\n user.save()\n app.run(host=args.ip, port=args.port, debug=args.debug)\n", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "projects.models.User.get_with_first", "line_number": 36, "usage_type": "call"}, {"api_name": "projects.models.User", "line_number": 36, "usage_type": "name"}, {"api_name": "projects.models.User", "line_number": 38, "usage_type": "call"}, {"api_name": "bcrypt.hashpw", "line_number": 40, "usage_type": "call"}, {"api_name": "bcrypt.gensalt", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "projects.app.run", "line_number": 48, "usage_type": "call"}, {"api_name": "projects.app", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "597161716", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: \n\"\"\"\nfrom setuptools import setup, find_packages\n\n__version__ = \"1.1.3\"\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n readme = f.read()\n\nwith open('requirements.txt', 'r', encoding='utf-8') as f:\n reqs = f.read()\n\nsetup(\n name='pytextclassifier',\n version=__version__,\n description='Text Classifier, Text Classification',\n long_description=readme,\n long_description_content_type='text/markdown',\n author='XuMing',\n author_email='xuming624@qq.com',\n url='https://github.com/shibing624/pytextclassifier',\n license=\"Apache 2.0\",\n classifiers=[\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Natural Language :: Chinese (Simplified)',\n 'Natural Language :: Chinese (Traditional)',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: Indexing',\n 'Topic :: Text Processing :: Linguistic',\n ],\n keywords='pytextclassifier,textclassifier,classifier,textclassification',\n install_requires=reqs.strip().split('\\n'),\n packages=find_packages(exclude=['tests']),\n package_dir={'pytextclassifier': 'pytextclassifier'},\n package_data={\n 'pytextclassifier': ['*.*', '../LICENSE', '../README.*', '../*.txt', 'data/*',\n 'tools/*', '../examples/thucnews_train_10w.txt'],\n },\n test_suite='tests',\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "setuptools.setup", "line_number": 16, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "268940394", "text": "import requests\nfrom bs4 import BeautifulSoup\n\n\ndef main():\n links = [] # массив ссылок\n url = 'https://dungeon.su/bestiary/' # URL главное страницы\n\n # получение главной страницы\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n\n # поиск всех нужных элементов li на главноей странице\n ul = soup.find('ul', {'class': 'list-of-items col4 double'})\n a = ul.select('a[href^=\"/bestiary/\"]')\n\n # заполнение массива ссылок\n for i in a:\n links.append(i['href'])\n\n # парсинг каждой ссылки из массива ссылок\n for link in links:\n link_parser(link)\n\n\n# парсер самих страниц с чудовищами\ndef link_parser(link):\n name = ''\n abilities = ''\n actions = ''\n description = ''\n\n link = 'https://dungeon.su' + link\n r = requests.get(link)\n soup = BeautifulSoup(r.text, 'html.parser')\n name = soup.find('a', {'class': 'item-link'}).get_text()\n ul = soup.find('ul', {'class': 'params'})\n for li in ul:\n li = soup.find('li', {'class': 'subsection'})\n print(li.get_text())\n\nmain()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "289341402", "text": "import os\nfrom glob import glob\nimport traceback\nimport logging\nimport cache_video\n\n\ndef main(in_dir, out_dir=None, indices=None, detection_model_path='weights/WIDERFace_DSFD_RES152.pth', batch_size=8,\n out_postfix='_dsfd.pkl'):\n out_dir = in_dir if out_dir is None else out_dir\n vid_paths = sorted(glob(os.path.join(in_dir, '*.mp4')))\n vid_paths = eval('vid_paths[%s]' % indices) if indices is not None else vid_paths\n\n # For each video file\n for i, vid_path in enumerate(vid_paths):\n vid_name = os.path.splitext(os.path.basename(vid_path))[0]\n curr_cache_path = os.path.join(out_dir, vid_name + out_postfix)\n\n if os.path.exists(curr_cache_path):\n print('[%d/%d] Skipping \"%s\"' % (i + 1, len(vid_paths), vid_name))\n continue\n else:\n print('[%d/%d] Processing \"%s\"...' % (i + 1, len(vid_paths), vid_name))\n\n # Process video\n try:\n cache_video.main(vid_path, curr_cache_path, detection_model_path, batch_size)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\nif __name__ == \"__main__\":\n # Parse program arguments\n import argparse\n parser = argparse.ArgumentParser('cache_video_batch')\n parser.add_argument('input', metavar='DIR',\n help='path input directory')\n parser.add_argument('-o', '--output', default=None, metavar='PATH',\n help='output directory')\n parser.add_argument('-i', '--indices', default=None,\n help='python style indices (e.g 0:10')\n parser.add_argument('-dm', '--detection_model', metavar='PATH', default='weights/WIDERFace_DSFD_RES152.pth',\n help='path to face detection model')\n parser.add_argument('-b', '--batch-size', default=8, type=int, metavar='N',\n help='batch size (default: 8)')\n parser.add_argument('-op', '--out_postfix', default='_dsfd.pkl', metavar='POSTFIX',\n help='output file postfix')\n args = parser.parse_args()\n main(args.input, args.output, args.indices, args.detection_model, args.batch_size, args.out_postfix)\n", "sub_path": "face_detection_dsfd/cache_video_batch.py", "file_name": "cache_video_batch.py", "file_ext": "py", "file_size_in_byte": 2164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "glob.glob", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cache_video.main", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 29, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 29, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "45107043", "text": "# Class to hold a single set of observations from the image coprocessor\r\n# One set of observations is derived from processing a single frame in the\r\n# image stream into one or more targets, along with metadata about the\r\n# processing time.\r\n\r\nimport json\r\n\r\nclass TargetObservation(object):\r\n def __init__(self):\r\n self.clear()\r\n\r\n def clear(self):\r\n self.frameCounter = 0 # Indicates which frame this was - incriments for each new processed frame\r\n # Target data. All arrays will be the same length. Each index in the arrays\r\n # represents a unique qualified target identified in the frame.\r\n self.Xs = [] # X coordinates of the centriods\r\n self.Ys = [] # Y coordinates of the centriods\r\n self.boundedAreas = [] # 2D areas enclosed by the found contours\r\n self.widths = [] # Total widths of the observed targets\r\n self.heights = [] # Total heights of the observed targets\r\n # Metadat used to track performance, or reduce phase delay effects in control loops\r\n self.procTime = 0 # Time from frame reception over IP to transmit, in milliseconds.\r\n self.cpuLoad = 0 # Percentage of cpu time spent doing things other than nothing\r\n self.memLoad = 0 # Percent of the available RAM bytes which are in use by some process\r\n self.fps = 0 # Number of frames per second that are recieved, processed, and transmitted.\r\n\r\n def addTarget(self, X_in, Y_in, area_in, width_in, height_in):\r\n self.Xs.append(X_in)\r\n self.Ys.append(Y_in)\r\n self.boundedAreas.append(area_in)\r\n self.widths.append(width_in)\r\n self.heights.append(height_in)\r\n\r\n\r\n def setMetadata(self, frameCounter_in, procTime_ms_in, cpuLoad_pct_in, memLoad_pct_in, fps_in):\r\n self.frameCounter = frameCounter_in\r\n self.procTime = procTime_ms_in\r\n self.cpuLoad = cpuLoad_pct_in\r\n self.memLoad = memLoad_pct_in\r\n self.fps = fps_in\r\n\r\n def toJsonString(self):\r\n # HEY YOU DEVELOPER LOOK HERE!!!!\r\n # The format used to create this json string must align with the\r\n # format expected by the roboRIO in the java code. If you alter this\r\n # code, you _must_ go look at the java code to ensure they remain aligned.\r\n infoDict = {\"frameCounter\":self.frameCounter,\r\n \"Xs\":self.Xs,\r\n \"Ys\":self.Ys,\r\n \"boundedAreas\":self.boundedAreas,\r\n \"widths\":self.widths,\r\n \"heights\":self.heights,\r\n \"procTime\":self.procTime,\r\n \"cpuLoad\":self.cpuLoad,\r\n \"memLoad\":self.memLoad,\r\n \"fps\":self.fps}\r\n\r\n return json.dumps(infoDict)\r\n\r\n\r\n\r\n\r\n\r\n\r\n# No main code to be had....\r\n", "sub_path": "RobotCasserole2017-master/BBB/TargetObservation.py", "file_name": "TargetObservation.py", "file_ext": "py", "file_size_in_byte": 2792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "240017678", "text": "from datetime import datetime\nfrom enum import Enum\n\nimport pandas as pd\nfrom typing import Optional, List, Dict\nfrom pydantic import BaseModel\n\nSeries = pd.core.series.Series\n\n# TODO\n# - find all possible roles\n# - make a globally available list of possible roles\n\n# Request related models\nclass Birth(BaseModel):\n start: datetime\n end: datetime\n\nclass Sex(str, Enum):\n M = \"M\"\n F = \"F\"\n\nclass Role(str, Enum):\n regular = \"regular\"\n chair = \"chair\"\n guest = \"guest\"\n\nclass Static(BaseModel):\n MoPs: Optional[List[str]]\n birth: Optional[Birth]\n sex: Optional[Sex]\n role: Optional[Role]\n\nclass Age(BaseModel):\n start: int\n end: int\n\nclass Dynamic(BaseModel):\n age: Optional[Age]\n group: Optional[str]\n party: Optional[str]\n\nclass Speakers(BaseModel):\n static: Optional[Static]\n dynamic: Optional[Dynamic]\n\nclass Interval(BaseModel):\n start: datetime\n end: datetime\n\nclass Data(BaseModel):\n interval: Optional[Interval]\n term: Optional[str]\n meeting: Optional[int]\n sitting: Optional[int]\n agenda: Optional[int]\n\nclass Request(BaseModel):\n speakers: Optional[Speakers]\n data: Data\n\n# Response related models\nclass Response(BaseModel):\n speaking_time: Dict[str, List[List]]\n relative_diff: Dict[str, List[List]]\n unanchored: Dict[str, List[List]]\n wpm: Dict[str, List[List]]\n\n\ndef update_response(response: Response, row: Series) -> None:\n update_speaking_time(response, row)\n update_relative_diff(response, row)\n update_unanchored(response, row)\n update_wpm(response, row)\n\ndef update_speaking_time(response: Response, row: Series) -> None:\n role = row[\"role\"]\n if role not in response[\"speaking_time\"]:\n response[\"speaking_time\"][role] = [[\n \"Speaker\",\n \"Words\",\n \"Sentences\",\n \"Paragraphs\",\n \"Utterances\"\n ]]\n\n response[\"speaking_time\"][role].append([\n row[\"speaker\"],\n row[\"length_word\"],\n row[\"length_sentence\"],\n row[\"length_paragraph\"],\n row[\"length_utterance\"]\n ])\n\ndef update_relative_diff(response: Response, row: Series) -> None:\n role = row[\"role\"]\n if role not in response[\"relative_diff\"]:\n response[\"relative_diff\"][role] = [[\n \"Speaker\",\n \"Utterance vs paragraph\",\n \"Paragraph vs sentence\",\n \"Sentence vs word\"\n ]]\n\n response[\"relative_diff\"][role].append([\n row[\"speaker\"],\n row[\"utterance-paragraph\"],\n row[\"paragraph-sentence\"],\n row[\"sentence-word\"]\n ])\n\ndef update_unanchored(response: Response, row: Series) -> None:\n role = row[\"role\"]\n if role not in response[\"unanchored\"]:\n response[\"unanchored\"][role] = [[\"Speaker\", \"Unanchored\"]]\n\n response[\"unanchored\"][role].append([\n row[\"speaker\"],\n row[\"unanchored\"]\n ])\n\ndef update_wpm(response: Response, row: Series) -> None:\n role = row[\"role\"]\n if role not in response[\"wpm\"]:\n response[\"wpm\"][role] = [[\"Speaker\", \"Words per mminute\"]]\n\n response[\"wpm\"][role].append([\n row[\"speaker\"],\n row[\"words_per_minute\"]\n ])\n\n", "sub_path": "src/server/multiple/interval.py", "file_name": "interval.py", "file_ext": "py", "file_size_in_byte": 3176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.core", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pydantic.BaseModel", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 19, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 23, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 34, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 45, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 56, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 59, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "377666675", "text": "from bs4 import BeautifulSoup, SoupStrainer, NavigableString, Tag\nfrom sqlalchemy import create_engine\nimport time\nimport csv\nimport requests\nimport re\nimport os\nimport numpy as np\nimport urllib.request\nimport urllib\nimport html5lib\nimport pandas as pd\nimport nltk\nimport datetime\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\nfrom pandas import DataFrame\nimport dropbox\ndropbox_access_token = \"-EBxOhuzOI4AAAAAAAAAAU1JAwmpo6Xy9PsYOguc1-mFh5QJsWAvENXusH06oWXr\"\nclient = dropbox.Dropbox(dropbox_access_token)\ndb_connect = create_engine(\n 'postgresql://awjzgmwqiatzjg:e4424ae3d375e2057bcc9cde832672940d44ea2c05260e28ccb04dc1575ec52d@ec2-34-204-22-76.compute-1.amazonaws.com:5432/dabbhqt4pegslv')\nconn = db_connect.connect()\ntimez = datetime.datetime.today().strftime(\"%Y/%m/%d\")\ntime = datetime.datetime.today().strftime(\"%Y-%m-%d\")\n# timez = '2021/09/24'\n# time = '2021-09-24'\nsource_type = \"news\"\n\n\ndef gambuuze_scrapper():\n general_name = '_gambuuze.csv'\n filename = time+general_name\n dropbox_path = \"/Coast_data/\"+filename\n datasource = \"Gambuuze news\"\n scraped_data = []\n today_articles = []\n heads_lines = []\n url = url = \"https://gambuuze.ug/\"+timez+\"/\"\n r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n snippet = soup.find_all('h3', attrs={\"class\": \"jeg_post_title\"})\n for i in snippet:\n for link in i.find_all('a'):\n line = link.get('href')\n today_articles.append(line)\n today_articles = list(set(today_articles))\n for x in today_articles:\n if str(timez) in x:\n r = requests.get(x, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"content-inner\"}):\n for link in division.find_all('p'):\n f = link.get_text()\n sentences = nltk.sent_tokenize(f)\n for sentence in sentences:\n scraped_data.append(sentence)\n snippet = soup.find_all(\n 'div', attrs={\"class\": \"jeg_inner_content\"})\n for i in snippet:\n for link in i.find_all('h1'):\n fl = link.get_text()\n hsentences = nltk.sent_tokenize(fl)\n for hsentence in hsentences:\n heads_lines.append(hsentence)\n scraped_data.extend(heads_lines)\n df = pd.DataFrame(scraped_data, columns=['Luganda'])\n df.drop_duplicates(subset='Luganda', keep='first', inplace=True)\n df['Luganda'].replace(' ', np.nan, inplace=True)\n corpus_length = len(df)\n df = df.dropna(subset=['Luganda'])\n if corpus_length >= 2:\n df.to_csv(filename, index=False)\n client.files_upload(open(filename, \"rb\").read(), dropbox_path)\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n else:\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n\n\ndef ssegwanga_scrapper():\n general_name = '_ssegwanga.csv'\n filename = time+general_name\n datasource = \"Ssegwanga news\"\n dropbox_path = \"/Coast_data/\"+filename\n link_urls = []\n corpus = []\n subhd = []\n hlines = []\n url = url = \"https://sseggwanga.com/index.php/\"+timez+\"/\"\n r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n snippet = soup.find_all('div', attrs={\"class\": \"td-ss-main-content\"})\n for i in snippet:\n for link in i.find_all('a'):\n line = link.get('href')\n link_urls.append(line)\n today_articles = list(set(link_urls))\n for x in today_articles:\n r = requests.get(x, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"td-post-content\"}):\n for link in division.find_all('p'):\n f = link.get_text()\n sentences = nltk.sent_tokenize(f)\n for sentence in sentences:\n corpus.append(sentence)\n for header in soup.find_all(\"header\", attrs={\"class\": \"td-post-title\"}):\n for link in header.find_all('h1'):\n fh = link.get_text()\n hsentences = nltk.sent_tokenize(fh)\n for hsentence in hsentences:\n hlines.append(hsentence)\n for subhead in header.find_all('p'):\n fb = subhead.get_text()\n bsentences = nltk.sent_tokenize(fb)\n for bsentence in bsentences:\n subhd.append(bsentence)\n hlines.extend(subhd)\n corpus.extend(hlines)\n df = pd.DataFrame(corpus, columns=['Luganda'])\n df.drop_duplicates(subset='Luganda', keep='first', inplace=True)\n df['Luganda'].replace(' ', np.nan, inplace=True)\n df = df.dropna(subset=['Luganda'])\n corpus_length = len(df)\n if corpus_length >= 2:\n df.to_csv(filename, index=False)\n client.files_upload(open(filename, \"rb\").read(), dropbox_path)\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n else:\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n\n\ndef dembe_scrapper():\n general_name = '_dembe.csv'\n filename = time+general_name\n dropbox_path = \"/Coast_data/\"+filename\n datasource = \"Dembe FM\"\n link_urls = []\n corpus_data = []\n sentx = []\n headz = []\n for page in range(1, 2):\n url = \"https://www.dembefm.ug/category/amawulire/page/{}\".format(page)\n r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"blog-archive\"}):\n for link in division.find_all('h2', attrs={\"class\": \"blog-arc-heading\"}):\n for rel in link.find_all('a'):\n pager = rel.get('href')\n r = requests.get(\n pager, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"single-col\"}):\n for dayt in division.find_all(\"p\", attrs={\"class\": \"blog-date\"}):\n f = dayt.get_text()\n data = f.replace(\"st\", \"\")\n data = data.replace(\"th\", \"\")\n data = data.replace(\"rd\", \"\")\n data = data.replace(\"nd\", \"\")\n dat = datetime.datetime.strptime(\n data, '%B %d, %Y').strftime('%Y/%m/%d')\n if dat == timez:\n for link in division.find_all('h2'):\n f = link.get_text()\n sentences = nltk.sent_tokenize(f)\n for sentence in sentences:\n corpus_data.append(sentence)\n for link in division.find_all('p'):\n fp = link.get_text()\n psentences = nltk.sent_tokenize(fp)\n for psentence in psentences:\n sentx.append(psentence)\n corpus_data.extend(sentx)\n df = pd.DataFrame(corpus_data, columns=['Luganda'])\n df.drop_duplicates(subset='Luganda', keep='first', inplace=True)\n df['Luganda'].replace(' ', np.nan, inplace=True)\n df = df.dropna(subset=['Luganda'])\n df = df[~df['Luganda'].str.split().str.len().lt(5)]\n corpus_length = len(df)\n if corpus_length >= 2:\n df.to_csv(filename, index=False)\n client.files_upload(open(filename, \"rb\").read(), dropbox_path)\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n else:\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n\n\ndef galaxyradio_scrapper():\n general_name = '_galaxyfm.csv'\n filename = time+general_name\n datasource = \"Galaxy FM\"\n dropbox_path = \"/Coast_data/\"+filename\n corpus_data = []\n headz = []\n for page in range(1, 6):\n url = \"https://www.galaxyfm.co.ug/luganda/page/{}\".format(page)\n r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"news-feed section group\"}):\n for link in division.find_all('div', attrs={\"class\": \"news-item col span_1_of_2\"}):\n for rel in link.find_all('a'):\n pager = rel.get('href')\n if timez in pager:\n r = requests.get(pager)\n soup = BeautifulSoup(r.content)\n for hdivision in soup.find_all(\"div\", attrs={\"class\": \"container body-block clear_fix mg-t-20 mg-b-10\"}):\n for hdlines in hdivision.find_all(\"h1\", attrs={\"class\": \"post-title\"}):\n hdline = hdlines.get_text()\n headz.append(hdline)\n for day_division in soup.find_all(\"div\", attrs={\"class\": \"post-content with-videos\"}):\n for mboz in day_division.find_all('p'):\n mf = mboz.get_text()\n msentences = nltk.sent_tokenize(mf)\n for msentence in msentences:\n corpus_data.append(msentence)\n corpus_data.extend(headz)\n df = pd.DataFrame(corpus_data, columns=['Luganda'])\n df.drop_duplicates(subset='Luganda', keep='first', inplace=True)\n df['Luganda'].replace(' ', np.nan, inplace=True)\n df = df.dropna(subset=['Luganda'])\n df = df[~df['Luganda'].str.split().str.len().lt(5)]\n corpus_length = len(df)\n if corpus_length >= 2:\n df.to_csv(filename, index=False)\n client.files_upload(open(filename, \"rb\").read(), dropbox_path)\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n else:\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n\n\ndef simba_scrapper():\n general_name = '_simba.csv'\n filename = time+general_name\n datasource = \"Radio Simba\"\n dropbox_path = \"/Coast_data/\"+filename\n link_urls = []\n corpus_data = []\n sentx = []\n headz = []\n for page in range(1, 6):\n url = \"https://www.radiosimba.ug/latest-news/page/{}\".format(page)\n r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"mvp-main-body-blog left relative\"}):\n for link in division.find_all('ul'):\n for paarg in link.find_all('li'):\n for post_time in paarg.find_all('span', attrs={\"class\": \"mvp-post-info-date left relative\"}):\n ptime = post_time.get_text()\n duration = \"hours\"\n if duration in ptime:\n ptime = ptime.replace('/', '')\n ptime = ptime.replace('hours', '')\n ptime = ptime.replace('ago', '')\n if int(ptime) <= 15:\n for xlink in paarg.find_all('a'):\n pager = xlink.get('href')\n r = requests.get(\n pager, headers={'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(r.content)\n for division in soup.find_all(\"div\", attrs={\"class\": \"theiaPostSlider_preloadedSlide\"}):\n for link in division.find_all('p'):\n f = link.get_text()\n sentences = nltk.sent_tokenize(f)\n for sentence in sentences:\n corpus_data.append(sentence)\n for division in soup.find_all(\"div\", attrs={\"class\": \"_1mf _1mj\"}):\n for link in division.find_all('span'):\n f = link.get_text()\n sentences = nltk.sent_tokenize(f)\n for sentence in sentences:\n sentx.append(sentence)\n for division in soup.find_all(\"div\", attrs={\"class\": \"left relative\"}):\n for link in division.find_all('h1'):\n f = link.get_text()\n sentences = nltk.sent_tokenize(f)\n for sentence in sentences:\n headz.append(sentence)\n corpus_data.extend(sentx)\n headz.extend(corpus_data)\n major = list(set(headz))\n df = pd.DataFrame(major, columns=['Luganda'])\n df.drop_duplicates(subset='Luganda', keep='first', inplace=True)\n df['Luganda'].replace(' ', np.nan, inplace=True)\n df = df.dropna(subset=['Luganda'])\n # df = df[~df['Luganda'].str.split().str.len().lt(5)]\n corpus_length = len(df)\n if corpus_length >= 2:\n df.to_csv(filename, index=False)\n client.files_upload(open(filename, \"rb\").read(), dropbox_path)\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n else:\n query = conn.execute(\n \"insert into socialmedia(datasource,corpus,source_type) values('{0}','{1}','{2}')\".format(datasource, corpus_length, source_type))\n\n\nsimba_scrapper()\ngalaxyradio_scrapper()\ndembe_scrapper()\nssegwanga_scrapper()\ngambuuze_scrapper()\n", "sub_path": "scrapper/newscraper.py", "file_name": "newscraper.py", "file_ext": "py", "file_size_in_byte": 14965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "dropbox.Dropbox", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 50, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 54, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 68, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 92, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 100, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 101, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 105, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 111, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 123, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 147, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 148, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 153, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "attribute"}, {"api_name": "nltk.sent_tokenize", "line_number": 168, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 173, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 179, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 202, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 203, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 209, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 210, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 218, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 224, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 249, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 250, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 264, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 266, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 270, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 276, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 282, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 290, "usage_type": "attribute"}]} +{"seq_id": "525760833", "text": "import os\nimport json\nimport sqlite3\nfrom sqlite3 import Error\n\ndef get_database():\n return \"game_state.db\"\n\n\ndef create_connetion(db_file):\n \"\"\" create a database connection to a SQLite database \"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n \n return conn\n\n\ndef create_table(conn, create_table_sql):\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n conn.commit()\n except Error as e:\n print(e)\n\n\ndef build_game_state_table():\n database_table_str = \"\"\"\n CREATE TABLE IF NOT EXISTS game_state_table (\n\tgame_code text PRIMARY KEY,\n\tgame_state text NOT NULL,\n\tactive integer NOT NULL\n);\n \"\"\"\n conn = create_connetion(db_file=get_database())\n if conn is not None:\n create_table(conn, database_table_str)\n return conn\n\n\ndef insert_game_state_in_db(game_code : str, game_state: dict):\n sql_insert = \"\"\"INSERT INTO game_state_table (game_code,game_state,active)\n VALUES(?,?,?);\"\"\"\n values = (game_code, json.dumps(game_state), True)\n with create_connetion(db_file=get_database()) as conn:\n c = conn.cursor()\n c.execute(sql_insert, values)\n conn.commit()\n\n\ndef update_game_state_in_db(game_code: str, game_state: dict, activate: bool):\n sql_update = \"\"\"UPDATE game_state_table\n SET game_state = ?,\n active = ? \n WHERE game_code = ?\"\"\"\n values = (json.dumps(game_state), activate, game_code)\n with create_connetion(db_file=get_database()) as conn:\n c = conn.cursor()\n c.execute(sql_update, values)\n conn.commit()\n\n\ndef get_game_state_in_db(game_code: str):\n sql_select = \"\"\"SELECT game_state FROM game_state_table WHERE game_code=? and active=?\"\"\"\n values = (game_code, True)\n output = {}\n with create_connetion(db_file=get_database()) as conn:\n c = conn.cursor()\n c.execute(sql_select, values)\n output = c.fetchone()\n conn.commit()\n output = json.loads(output[0])\n return output\n\n\ndef upsert_game_state_in_db(game_code: str, game_state: dict, activate: bool):\n sql_upsert = \"\"\"\n INSERT INTO game_state_table (game_code,game_state,active)\n VALUES(?,?,?) ON CONFLICT(game_code) DO UPDATE\n SET game_state=?,\n active=?;\"\"\"\n print(game_state)\n game_state_string = json.dumps(game_state)\n values = (game_code, game_state_string, activate, game_state_string, activate)\n output = False\n with create_connetion(db_file=get_database()) as conn:\n c = conn.cursor()\n c.execute(sql_upsert, values)\n conn.commit()\n output = True\n return output\n\n", "sub_path": "backend_code/Database/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 2734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sqlite3.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 27, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 60, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "486422062", "text": "# Copyright (c) 2007, Enthought, Inc.\n# License: BSD Style.#-- Imports --------------------------------------------------------------------\n\nfrom numpy.random \\\n import random\n\nfrom traits.api \\\n import HasTraits, Array, Int, Property\n\nfrom traitsui.api \\\n import View, Item\n\nfrom traitsui.ui_editors.array_view_editor \\\n import ArrayViewEditor\n\n#-- ShowArray demo class -------------------------------------------------------\n\nclass ShowArray ( HasTraits ):\n\n row = Int( 5, enter_set = True, auto_set = False )\n\n data = Property( Array, depends_on = 'row' )\n def _get_data( self ):\n return random( ( self.row, 3 ) )\n\n view = View( 'row',\n Item( \n 'data',\n show_label = False,\n editor = ArrayViewEditor( titles = [ 'x', 'y', 'z' ],\n format = '%.4f',\n font = 'Arial 8' )\n ),\n title = 'Array Viewer',\n width = 0.3,\n height = 0.8,\n resizable = True\n )\n\n#-- Run the demo ---------------------------------------------------------------\n\n# Create the demo:\ndemo = ShowArray()\n\n# Run the demo (if invoked from the command line):\nif __name__ == '__main__':\n demo.configure_traits()\n", "sub_path": "scratch/enthought_tests/array_editor.py", "file_name": "array_editor.py", "file_ext": "py", "file_size_in_byte": 1279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "traits.api.HasTraits", "line_number": 18, "usage_type": "name"}, {"api_name": "traits.api.Int", "line_number": 20, "usage_type": "call"}, {"api_name": "traits.api.Property", "line_number": 22, "usage_type": "call"}, {"api_name": "traits.api.Array", "line_number": 22, "usage_type": "argument"}, {"api_name": "numpy.random.random", "line_number": 24, "usage_type": "call"}, {"api_name": "traitsui.api.View", "line_number": 26, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 27, "usage_type": "call"}, {"api_name": "traitsui.ui_editors.array_view_editor.ArrayViewEditor", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "438007717", "text": "from __future__ import division # Python 2 users only\nimport nltk, re, pprint\nfrom nltk import word_tokenize\n\n\nclass NLPParser():\n\n\tdef __init__(self):\n\t\tprint(\"Initialising NLP-Parser Module\")\n\t\tself.storylist = ['frozen','inside Out','cinderella']\n\n\t#tokenize queries \n\tdef tokenize(self,query):\n\t\ttokens = word_tokenize(query.lower())\n\t\tprint(\"Tokenizing\" + str(len(tokens)))\n\t\treturn tokens\n\n\t#Parses the message to determine if there is a film/story title\n\t#Returns all the matches stories\n\tdef StoryParse(self,query):\n\t\ttokens = self.tokenize(query)\n\t\tmatches = []\n\t\tprint(\"Story parsing\")\n\t\tfor token in tokens:\n\t\t\tif token in self.storylist:\n\t\t\t\tprint(\"in story list!\")\n\t\t\t\tmatches.append(token)\n\t\treturn matches\n\n\n\n\n\n\n", "sub_path": "parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "nltk.word_tokenize", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "338866881", "text": "# -*- coding:utf-8 -*-\n# @Time :2019/3/6 15:03\n# @Author :zhuhejun\n\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nimport numpy as np\nimport random\n\ndef rotate_bound(img, angle):\n h, w = img.shape[:2]\n cx, cy = w // 2, h // 2\n mat = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)\n cos = np.abs(mat[0, 0])\n sin = np.abs(mat[0, 1])\n nw = int((h * sin) + (w * cos))\n nh = int((h * cos) + (w * sin))\n mat[0, 2] += (nw / 2) - cx\n mat[1, 2] += (nh / 2) - cy\n\n return cv2.warpAffine(img, mat, (nw, nh), borderMode=cv2.BORDER_REPLICATE)\n\ndef gen_text(font_path, font_color, font_size, text, loc_status=1):\n font = ImageFont.truetype(font_path, font_size)\n\n height = int(font_size * 3)\n width = int(len(text) * height * 1.5)\n # width = int(font_size * 3)\n # height = int(len(text) * width * 1.5)\n if loc_status:\n ori_text_image = Image.new(\"RGBA\", (width, height), (255, 255, 255, 0)) # 255不透明 0全透明\n draw_handle = ImageDraw.Draw(ori_text_image, \"RGBA\")\n pos_list = []\n angle = np.random.random() * 2 - 1\n for char_idx, char in enumerate(text):\n # print(char )\n if char == ' ':\n continue\n tmp_text = text[:char_idx + 1]\n\n # draw_handle.text((3, 3), tmp_text, fill=font_color, font=font)\n draw_handle.text((3, 3), tmp_text, fill=font_color, font=font)\n text_image = cv2.cvtColor(np.asarray(ori_text_image), cv2.COLOR_RGBA2BGRA)\n text_image = rotate_bound(text_image, angle)\n deta_img = text_image[:, :, 3].copy()\n if len(pos_list):\n deta_img[:, :pos_list[-1][-1] + 2] = 0\n # deta_img[:pos_list[-1][-3] + 2, :] = 0\n pos = np.where(deta_img[:, :] > 100)\n # print('232',pos)\n # print('qqq',pos[0])\n # print('www',pos[1])\n pos_list.append([min(pos[0]), max(pos[0]), min(pos[1]), max(pos[1])])\n # print('qqq',pos_list)\n\n ori_text_image = Image.new(\"RGBA\", (width, height), (255, 255, 255, 0)) # 255不透明 0全透明\n\n draw_handle = ImageDraw.Draw(ori_text_image, \"RGBA\")\n # draw_handle.text((3, 3), text, fill=font_color, font=font)\n draw_handle.text((3, 3), text, fill=font_color, font=font)\n text_image = cv2.cvtColor(np.asarray(ori_text_image), cv2.COLOR_RGBA2BGRA)\n text_image = rotate_bound(text_image, angle)\n\n point_list = [[(x1 + x2) // 2, (y1 + y2) // 2] for y1, y2, x1, x2 in pos_list]\n deta_img = text_image[:, :, 3].copy()\n pos = np.where(deta_img[:, :] > 100)\n char_size = max(pos[0]) - min(pos[0])\n # char_size = max(pos[1]) - min(pos[1])\n # print(point_list)\n return text_image, char_size, point_list\n else:\n angle = np.random.random() * 2 - 1\n ori_text_image = Image.new(\"RGBA\", (width, height), (255, 255, 255, 0)) # 255不透明 0全透明\n\n draw_handle = ImageDraw.Draw(ori_text_image, \"RGBA\")\n\n draw_handle.text((3, 3), text, fill=font_color, font=font)\n\n # draw_handle.text((3, 3), '\\n'.join(text), fill=font_color, font=font)\n text_image = cv2.cvtColor(np.asarray(ori_text_image), cv2.COLOR_RGBA2BGRA)\n text_image = rotate_bound(text_image, angle)\n\n # 字��变宽\n if np.random.random()>0.7:\n h,w = text_image.shape[:2]\n temp_size = random.uniform(1.1,2.1)\n size = (int(w*temp_size), h)\n text_image = cv2.resize(text_image,size)\n\n deta_img = text_image[:, :, 3].copy()\n pos = np.where(deta_img[:, :] > 100)\n # char_size = max(pos[1]) - min(pos[1])\n # 字高度\n char_size = max(pos[0])-min(pos[0])\n return text_image, char_size, None\n\n\nif __name__ == '__main__':\n pass\n", "sub_path": "util/gen_text_util.py", "file_name": "gen_text_util.py", "file_ext": "py", "file_size_in_byte": 3848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cv2.getRotationMatrix2D", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 32, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGBA2BGRA", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 59, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 59, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGBA2BGRA", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.Image.new", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 74, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 76, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGBA2BGRA", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "519148170", "text": "import os\nimport logging\nimport requests\nfrom pprint import pprint\nfrom datetime import datetime\n\n#configuring the logger - tell it what file to write to, \n#what level of logging to record(DEBUG means everything), and\n#format of string that is recorded for each log eveng\nlogging.basicConfig(filename='debug.log', level=logging.DEBUG, format=f'%(asctime)s - %(levelname)s - %(message)s')\n\n\nkey = os.environ.get('WEATHER_KEY')\nurl = 'http://api.openweathermap.org/data/2.5/forecast'\n\ndef main():\n location = get_location()\n forecast_data, error= get_forecast(location,key)\n if error:\n print('Sorry, could not get the forecast')\n else:\n show_forecast(forecast_data)\n \n\n\ndef get_location():\n city,country= '',''\n while len(city)==0:\n city = input('Enter the name of the city: ').strip().title() #remove white spaces and make title case\n while len(country) !=2 or not country.isalpha():\n country = input('Enter the 2-letter country code: ').strip().upper()\n location = f'{city},{country}'\n logging.info(f'User has entered valid location \"{location}\"')\n return location\n\n\ndef get_forecast(location,key):\n try:\n query= {'q': location, 'units': 'imperial', 'appid': key}\n response = requests.get(url, params=query)\n response.raise_for_status() #will raise an exception for 400(client) or 500(server) errors\n forecast_data = response.json() #convert the response to json \n logging.debug(f'response received from API and converted to JSON')\n return forecast_data, None\n \n except Exception as ex:\n logging.exception(f'Error requesting URL {url}')\n logging.info(ex)\n logging.info(response.text)\n return None,ex\n \ndef show_forecast(forecast_data):\n try:\n list_of_forecasts = forecast_data['list']\n for forecast in list_of_forecasts: \n temp = forecast['main']['temp']\n timestamp = forecast['dt']\n forecast_date = datetime.fromtimestamp(timestamp)\n weather_description = forecast['weather'][0]['description']\n wind_speed = forecast['wind']['speed']\n print(f'At {forecast_date:%m-%d-%Y %H:%M}, the temperature will be {temp:.1f}F, the windspeed will be {wind_speed:.0f} mph, and the forecast is {weather_description}.')\n \n except KeyError:\n #print('This data is not in the format expected') #change this to logging\n logging.exception(f' The data is not in the format expected - {forecast_data}')\n return 'Unknown'\n\n \nif __name__ == '__main__':\n main()", "sub_path": "forecast.py", "file_name": "forecast.py", "file_ext": "py", "file_size_in_byte": 2613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "name"}, {"api_name": "logging.exception", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "317026839", "text": "from django.urls import path, reverse_lazy\nfrom django.views.generic import TemplateView\nfrom . import views\n\napp_name='ads'\nurlpatterns = [\n #path('', TemplateView.as_view(template_name='ads/main_menu.html'), name='main'),\n path('page1', TemplateView.as_view(template_name='ads/main_menu.html'), name='page1'),\n path('ads10', TemplateView.as_view(template_name='ads/main_menu.html'), name='ads10'),\n #path('', views.AdListView.as_view()),\n path('', views.AdListView.as_view(), name = 'main'),\n path('ads', views.AdListView.as_view(), name = 'all'),\n path('ad/', views.AdDetailView.as_view(), name = 'ad_detail'),\n path('ad/create', views.AdCreateView.as_view(), name = 'ad_create'),\n path('ad//update', views.AdUpdateView.as_view(), name = 'ad_update'),\n path('ad//delete', views.AdDeleteView.as_view(), name = 'ad_delete'),\n path('ad/ad_picture/', views.stream_file, name = 'ad_picture'),\n path('ad//comment', views.CommentCreateView.as_view(), name='ad_comment_create'),\n path('comment//delete', views.CommentDeleteView.as_view(success_url=reverse_lazy('ads:all')), name='ad_comment_delete'),\n path('ad//favorite', views.AddFavoriteView.as_view(), name='ad_favorite'), \n path('ad//unfavorite', views.DeleteFavoriteView.as_view(), name='ad_unfavorite'),\n]\n", "sub_path": "mysite/ads/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "583057148", "text": "from django.urls import path, re_path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom materials import views\n\nurlpatterns = [\n path('textscore/', views.TextScore.as_view()),\n path('difficultwords/', views.DifficultWords.as_view()),\n path('handouts/', views.HandoutList.as_view()),\n path('handouts//', views.HandoutDetail.as_view()),\n re_path(r'^definitions/$', views.Definitions.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n", "sub_path": "materials/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "materials.views.TextScore.as_view", "line_number": 6, "usage_type": "call"}, {"api_name": "materials.views.TextScore", "line_number": 6, "usage_type": "attribute"}, {"api_name": "materials.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "materials.views.DifficultWords.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "materials.views.DifficultWords", "line_number": 7, "usage_type": "attribute"}, {"api_name": "materials.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "materials.views.HandoutList.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "materials.views.HandoutList", "line_number": 8, "usage_type": "attribute"}, {"api_name": "materials.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "materials.views.HandoutDetail.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "materials.views.HandoutDetail", "line_number": 9, "usage_type": "attribute"}, {"api_name": "materials.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 10, "usage_type": "call"}, {"api_name": "materials.views.Definitions.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "materials.views.Definitions", "line_number": 10, "usage_type": "attribute"}, {"api_name": "materials.views", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.urlpatterns.format_suffix_patterns", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "416849261", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('master', '0002_inventaris_tgl_jt'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='inventaris',\n name='penyusutan_perbulan',\n field=models.DecimalField(max_digits=12, decimal_places=0),\n ),\n ]\n", "sub_path": "master/migrations/0003_auto_20161024_1541.py", "file_name": "0003_auto_20161024_1541.py", "file_ext": "py", "file_size_in_byte": 438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "20835829", "text": "\"\"\"\nModule testing code\n\"\"\"\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nimport matplotlib as mpl\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt, mpld3\n\nfrom os import listdir\nfrom os.path import isfile, join\n\nsys.path.insert(0, os.path.abspath('/Users/xwang/Documents/Project/prj/public-prosper-projects/strategy-robot/'))\nfrom stratbot import StrategyRobot\n\nimport matplotlib.backends.backend_pdf\n\ndef make_top_variable_plot(top_variable_book):\n\n top_variables = [k for k in top_variable_book._transformed_variables.keys()]\n print(top_variables)\n\n figures_list = top_variable_book.make_control_plots(which_variables = top_variables,\n ylim = (0, 2),\n ylim_double_ratio = (0.5, 3.5),\n rounding = 2,\n canvas_size = (9, 5),\n label_test_group = '11/15 - 02/16',\n label_control_group = '08/15 - 10/15',\n output_dir = os.getcwd() + '/output/16plus_DQ_')\n\n pdf = matplotlib.backends.backend_pdf.PdfPages(\"top_variable_plot.pdf\")\n\n for fig in figures_list:\n pdf.savefig(fig)\n pdf.close()\n\n\ndef main():\n\n dev = pd.read_pickle(\"data_input.pkl\") \n #dev = pd.read_pickle(\"data_input_old.pkl\") \n #dev = pd.read_pickle(\"data_input_new.pkl\") \n test = dev\n\n features = [v for v in dev.columns.tolist() if v not in ['M2_16PlusDQ_Prin', 'state', 'group_label']] \n #features = ['FICO', 'PMI6_1']\n #features = ['BAC031_NumOpenBankcardTradesBalanceGT0ReptdLast6Mos', 'FICO']\n #features = features[300:]\n formula = 'M2_16PlusDQ_Prin / loanamount : loanamount | group_label ~ ' + ' + '.join(features)\n\n print(dev.shape)\n\n strb = StrategyRobot(dev, test, formula = formula, aggregate = False) \n\n book = strb.book_creation(nbins = 5,\n nbins_monotone = 5,\n monotone_sig_level_threshold = 0.4, \n yname = '16+ DQ at M2 (%)') \n\n #book.make_control_plots(which_variables = features,\n # ylim = (0, 2),\n # ylim_double_ratio = (0.5, 3.5),\n # rounding = 2,\n # canvas_size = (9, 5),\n # label_test_group = '11/15 - 02/16',\n # label_control_group = '08/15 - 10/15',\n # output_dir = os.getcwd() + '/output/16plus_DQ_')\n\n var_sel = strb.variable_selection(book)\n top_variable_book = var_sel.select_k_best(method = 'univariate',\n drop_correlated = True,\n drop_correlated_threshold = 0.6, \n k_best = 12,\n force_monotone = True,\n rounding = 1,\n output_dir = os.getcwd() + '/output/16plus_DQ_') \n\n make_top_variable_plot(top_variable_book)\n\n\n\n #optimize = strb.optimization(top_variable_book, \n # n_init_vars = 3, \n # eval_on_penalized = True, \n # patience = True)\n #optimize.search_rules(decrease_frac = 0.1,\n # stopping_eff = 0.02#,\n # #force_granularity_delta = 0.999\n # )\n #optimize.expand_rules(increase_frac = 0.1)\n #optimize.prune_rules(tolerate_n_std = 0.0)\n #optimize.select_rules()\n\n #optimize.performance_output(output_dir = os.getcwd() + '/output/UTD_15plus_DQ_')\n\n\n\nif __name__ == '__main__':\n main()\n\n\n", "sub_path": "tests/dq_uptick.py", "file_name": "dq_uptick.py", "file_ext": "py", "file_size_in_byte": 4087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.insert", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.backends", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 46, "usage_type": "call"}, {"api_name": "stratbot.StrategyRobot", "line_number": 59, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "93871508", "text": "# -*- coding: UTF8 -*-\n\nimport locale\nimport os\nimport qgis.utils\nimport time\n\nfrom datetime import timedelta\nfrom functools import partial\n\nfrom qgis.core import *\nfrom qgis.gui import *\n\nfrom PyQt5 import QtCore, QtWidgets, uic\nfrom PyQt5.QtGui import QIcon, QPixmap\n\nfrom .q_dialog_next import QDialogNext\nfrom .reportes import Reportes\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n\tos.path.dirname(__file__), 'calendario.ui'))\n\nclass Calendario(QDialogNext, FORM_CLASS):\n\n\tdef __init__(self,online,sensor,parent=None):\n\t\t\"\"\"Constructor.\"\"\"\n\t\tsuper(Calendario, self).__init__(parent)\n\t\tself.setupUi(self)\n\t\tself.datos = dict()\n\t\tself.online = online\n\t\tself.setWindowTitle(\"Calendario de {}: sensor de {}\".format(sensor.grupoTexto, sensor.tipoSensorTexto.lower()))\n\t\tself.setMovable(self.kraken)\n\t\tself.setBotonCerrar(self.botonCerrar)\n\t\tself.sensor = sensor\n\t\tself.__cambiarContexto()\n\t\tself.barraDeProgreso.setVisible(False)\n\t\tif self.cambioDeMes(self.calendario.selectedDate().year(),self.calendario.selectedDate().month()):\n\t\t\tself.show()\n\t\tself.__signals()\n\n\tdef __signals(self):\n\t\tself.calendario.selectionChanged.connect(self.cambioDeDia)\n\t\tself.calendario.currentPageChanged.connect(self.cambioDeMes)\n\t\tself.botonDia.clicked.connect(self.generarReporteDelDia)\n\t\tself.botonMes.clicked.connect(self.generarReporteDelMes)\n\t\tself.botonPersonalizado.clicked.connect(self.__elegirFechas)\n\n\tdef cambioDeDia(self):\n\t\tself.totalDia.setText(\"%.2f\" % 0)\n\t\tself.maxDia.setText(\"%.2f\" % 0)\n\t\tyear = self.calendario.yearShown()\n\t\tmonth = self.calendario.monthShown()\n\t\tday = self.calendario.selectedDate().day()\n\t\tmes = \"%04d%02d01\" % (year,month)\n\t\tfecha = '%s%02d' % (mes[0:6],day)\n\t\tfor historial in self.datos[mes]['historial_del_dia']:\n\t\t\tif historial['fecha'] == fecha:\n\t\t\t\tif historial['minmax'] == '0':\n\t\t\t\t\tself.totalDia.setText(\"%.2f\" % float(historial['dato']))\n\t\t\t\telif historial['minmax'] == '1':\n\t\t\t\t\tself.maxDia.setText(\"%.2f\" % float(historial['dato']))\n\n\tdef cambioDeMes(self,year,month):\n\t\tself.totalMes.setText(\"%.2f\" % 0)\n\t\tself.maxMes.setText(\"%.2f\" % 0)\n\t\tmes = \"%04d%02d01\" % (year,month)\n\t\ttry:\n\t\t\tif mes not in self.datos:\n\t\t\t\tself.datos[mes] = self.online.consultarHistorial(self.sensor.idSensor,mes)\n\t\t\tfor historial in self.datos[mes]['historial_del_mes']:\n\t\t\t\tif historial['minmax'] == '0':\n\t\t\t\t\tself.totalMes.setText(\"%.2f\" % float(historial['dato']))\n\t\t\t\telif historial['minmax'] == '1':\n\t\t\t\t\tself.maxMes.setText(\"%.2f\" % float(historial['dato']))\n\t\t\tself.cambioDeDia()\n\t\t\treturn True\n\t\texcept TypeError:\n\t\t\tself.hide()\n\t\t\treturn False\n\n\tdef generarReporteDelDia(self):\n\t\treportes = Reportes(self.online)\n\t\tyear = self.calendario.yearShown()\n\t\tmonth = self.calendario.monthShown()\n\t\tday = self.calendario.selectedDate().day()\n\t\tfecha = '%04d%02d%02d' % (year,month,day)\n\t\tmes = \"%s01\" % fecha[0:6]\n\t\tdato = []\n\t\tfor historial in self.datos[mes]['historial_del_dia']:\n\t\t\tif historial['fecha'] == fecha:\n\t\t\t\tdato.append(float(historial['dato']))\n\t\ttry:\n\t\t\treportes.generarReporteDelDia(self.sensor,fecha,dato)\n\t\t\tself.labelEstado.setText(\"Almacenado satisfactoriamente en la ruta indicada\")\n\t\texcept IndexError:\n\t\t\tself.labelEstado.setText(\"No se generó el reporte\")\n\t\texcept PermissionError:\n\t\t\tself.labelEstado.setText(\"No se generó el reporte. Archivo ocupado\")\n\t\texcept FileNotFoundError:\n\t\t\tself.labelEstado.setText('')\n\n\tdef generarReporteDelMes(self):\n\t\treportes = Reportes(self.online)\n\t\tyear = self.calendario.yearShown()\n\t\tmonth = self.calendario.monthShown()\n\t\tmes = '%04d%02d01' % (year,month)\n\t\ttry:\n\t\t\treportes.generarReporteDelMes(self.sensor,mes,self.datos[mes])\n\t\t\tself.labelEstado.setText(\"Almacenado satisfactoriamente en la ruta indicada\")\n\t\texcept IndexError:\n\t\t\tself.labelEstado.setText(\"No se generó el reporte\")\n\t\texcept PermissionError:\n\t\t\tself.labelEstado.setText(\"No se generó el reporte. Archivo ocupado\")\n\t\texcept FileNotFoundError:\n\t\t\tself.labelEstado.setText('')\n\n\tdef cerrar(self):\n\t\tself.close()\n\n\tdef __cambiarContexto(self):\n\t\tif self.sensor.tipoSensor == 1:\n\t\t\tflag = True\n\t\telif self.sensor.tipoSensor == 2:\n\t\t\tflag = False\n\t\tif flag:\n\t\t\tself.labelTotalDia.setText(\"Presión mínima del día:\")\n\t\t\tself.totalDiaU.setText(\"mca\")\n\t\t\tself.labelTotalMes.setText(\"Presión mínima del mes:\")\n\t\t\tself.totalMesU.setText(\"mca\")\n\t\telse:\n\t\t\tself.labelTotalDia.setText(\"Total del día:\")\n\t\t\tself.totalDiaU.setText(\"m³\")\n\t\t\tself.labelTotalMes.setText(\"Total del mes:\")\n\t\t\tself.totalMesU.setText(\"m³\")\n\t\tself.labelMaxDia.setVisible(flag)\n\t\tself.maxDia.setVisible(flag)\n\t\tself.maxDiaU.setVisible(flag)\n\t\tself.labelMaxMes.setVisible(flag)\n\t\tself.maxMes.setVisible(flag)\n\t\tself.maxMesU.setVisible(flag)\n\n\tdef __elegirFechas(self):\n\t\tself.__bloquearInterfaz()\n\t\tself.labelEstado.setText(\"Elige el primer día\")\n\t\tself.calendario.selectionChanged.disconnect()\n\t\tself.calendario.clicked.connect(self.__elegirSegundoDia)\n\n\tdef __elegirSegundoDia(self):\n\t\tself.labelEstado.setText(\"Elige el último día\")\n\t\tfechaInicial = self.calendario.selectedDate().toPyDate()\n\t\tself.labelFechas.setText(fechaInicial.strftime(\"Del %d de %B de %Y al...\"))\n\t\tself.calendario.clicked.disconnect()\n\t\tself.calendario.clicked.connect(partial(self.generarReportePersonalizado,fechaInicial))\n\n\tdef generarReportePersonalizado(self,fechaInicial):\n\t\tfechaFinal = self.calendario.selectedDate().toPyDate()\n\t\tif fechaFinal > fechaInicial:\n\t\t\tself.labelFechas.setText('%s %s' % (fechaInicial.strftime(\"Del %d de %B de %Y al\"),fechaFinal.strftime(\"%d de %B de %Y\")))\n\t\t\treportes = Reportes(self.online)\n\t\t\ttry:\n\t\t\t\ttotalMeses = (fechaFinal.year - fechaInicial.year) * 12 + fechaFinal.month - fechaInicial.month\n\t\t\t\tmes = fechaInicial\n\t\t\t\tself.barraDeProgreso.setMaximum(totalMeses-1)\n\t\t\t\tself.barraDeProgreso.setVisible(True)\n\t\t\t\tfor i in range(0,totalMeses-1):\n\t\t\t\t\tmes = mes.replace(day=1) + timedelta(days = 32)\n\t\t\t\t\tmes = mes.replace(day=1)\n\t\t\t\t\tmesString = mes.strftime(\"%Y%m%d\")\n\t\t\t\t\tif mesString in self.datos:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.datos[mesString] = self.online.consultarHistorial(self.sensor.idSensor,mesString)\n\t\t\t\t\tself.barraDeProgreso.setValue(i+1)\n\t\t\t\tself.barraDeProgreso.setVisible(False)\n\t\t\t\treportes.generarReportePersonalizado(self.sensor,fechaInicial,fechaFinal,self.datos)\n\t\t\t\tself.labelFechas.setText('')\n\t\t\t\tself.labelEstado.setText(\"Almacenado satisfactoriamente en la ruta indicada\")\n\t\t\texcept IndexError:\n\t\t\t\tself.labelFechas.setText('')\n\t\t\t\tself.labelEstado.setText(\"No se generó el reporte\")\n\t\t\texcept PermissionError:\n\t\t\t\tself.labelFechas.setText('')\n\t\t\t\tself.labelEstado.setText(\"No se generó el reporte. Archivo ocupado\")\n\t\t\texcept FileNotFoundError:\n\t\t\t\tself.labelFechas.setText('')\n\t\t\t\tself.labelEstado.setText('')\n\t\telse:\n\t\t\tself.labelFechas.setText('')\n\t\t\tself.labelEstado.setText(\"No se generó el reporte. La fecha inicial tiene que ser menor que la final\")\n\t\tself.__bloquearInterfaz(False)\n\t\tself.calendario.clicked.disconnect()\n\t\tself.calendario.selectionChanged.connect(self.cambioDeDia)\n\n\tdef __bloquearInterfaz(self,flag=True):\n\t\tself.botonDia.setEnabled(not flag)\n\t\tself.botonMes.setEnabled(not flag)\n\t\tself.labelTotalDia.setEnabled(not flag)\n\t\tself.totalDia.setEnabled(not flag)\n\t\tself.totalDiaU.setEnabled(not flag)\n\t\tself.labelTotalMes.setEnabled(not flag)\n\t\tself.totalMes.setEnabled(not flag)\n\t\tself.totalMesU.setEnabled(not flag)\n\t\tself.labelMaxDia.setEnabled(not flag)\n\t\tself.maxDia.setEnabled(not flag)\n\t\tself.maxDiaU.setEnabled(not flag)\n\t\tself.labelMaxMes.setEnabled(not flag)\n\t\tself.maxMes.setEnabled(not flag)\n\t\tself.maxMesU.setEnabled(not flag)\n\t\ticon = QIcon()\n\t\tif flag:\n\t\t\ticon.addPixmap(QPixmap(':Calendario/icons/rep-cancelar.png'))\n\t\t\tself.botonPersonalizado.setToolTip(\"Cancelar\")\n\t\t\tself.botonPersonalizado.disconnect()\n\t\t\tself.botonPersonalizado.clicked.connect(self.__cancelar)\n\t\telse:\n\t\t\ticon.addPixmap(QPixmap(':Calendario/icons/rep-personalizable.png'))\n\t\t\tself.botonPersonalizado.disconnect()\n\t\t\tself.botonPersonalizado.clicked.connect(self.__elegirFechas)\n\t\tself.botonPersonalizado.setIcon(icon)\n\n\tdef __cancelar(self):\n\t\tself.labelEstado.setText('')\n\t\tself.labelFechas.setText('')\n\t\tself.botonPersonalizado.setToolTip(\"Generar reporte personalizado\")\n\t\tself.__bloquearInterfaz(False)\n\t\ttry:\n\t\t\tself.calendario.clicked.disconnect()\n\t\texcept TypeError:\n\t\t\tpass\n\t\tself.calendario.selectionChanged.connect(self.cambioDeDia)\n\t\tself.botonPersonalizado.disconnect()\n\t\tself.botonPersonalizado.clicked.connect(self.__elegirFechas)\n", "sub_path": "calendario.py", "file_name": "calendario.py", "file_ext": "py", "file_size_in_byte": 8428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "PyQt5.uic.loadUiType", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "q_dialog_next.QDialogNext", "line_number": 23, "usage_type": "name"}, {"api_name": "reportes.Reportes", "line_number": 82, "usage_type": "call"}, {"api_name": "reportes.generarReporteDelDia", "line_number": 93, "usage_type": "call"}, {"api_name": "reportes.Reportes", "line_number": 103, "usage_type": "call"}, {"api_name": "reportes.generarReporteDelMes", "line_number": 108, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 153, "usage_type": "call"}, {"api_name": "reportes.Reportes", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 166, "usage_type": "call"}, {"api_name": "reportes.generarReportePersonalizado", "line_number": 175, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 209, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 211, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "194864652", "text": "from django.conf.urls import include, url\n\nfrom content import url_scheme\nfrom events import views\n\nurlpatterns = [\n url(r\"^$\", views.EventIndex.as_view(), name=\"event_index\"),\n url(r\"^dodaj/$\", views.AddExternalEvent.as_view(), name=\"add_external_event\"),\n url(\n r\"^(?P\\d+)/edytuj\",\n views.EditExternalEvent.as_view(),\n name=\"edit_external_event\",\n ),\n url(\n r\"^(?P\\d+)/usun\",\n views.DeleteExternalEvent.as_view(),\n name=\"delete_external_event\",\n ),\n]\n", "sub_path": "events/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "events.views.EventIndex.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "events.views.EventIndex", "line_number": 7, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "events.views.AddExternalEvent.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "events.views.AddExternalEvent", "line_number": 8, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "events.views.EditExternalEvent.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "events.views.EditExternalEvent", "line_number": 11, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "events.views.DeleteExternalEvent.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "events.views.DeleteExternalEvent", "line_number": 16, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "110966588", "text": "import matplotlib.pyplot as mplt\nimport math\n\ndata1 = open(\"disney_plus_titles.csv\", \"r\", encoding=\"utf8\")\ndata = data1.read()\ndata1.close()\n\ndata1 = data.split(\"\\n\")\ndatos = data1.pop(0)\ndata = []\n\nfor i in data1:\n a = i.split(',')\n data.append(a)\n\ndata.pop(-1)\n\n# funciones necesarias para crear la lista con todos los datos\n\n\ndef crear_listapop(lista, valor1, valor2):\n lista_pop = []\n for i in range(len(lista)):\n if valor1 <= i <= valor2:\n lista_pop.append(i + 1)\n return lista_pop\n\n\ndef crear_lista_datos_agrupados_en_comillas(lista, valor1, valor2):\n listax = []\n for i in range(len(lista)):\n if valor1 <= i <= valor2:\n a = lista[i]\n a = limpiar_comillas(a)\n listax.append(a)\n return listax\n\n\ndef popearx(lista, listapop):\n decontador = 0\n for i in listapop:\n lista.pop(i - decontador)\n decontador += 1\n\n\ndef limpiar_comillas(string):\n if string[0] == '\\\"':\n string = string[1:]\n if string[-1] == '\\\"':\n string = string[:-1]\n return(string)\n\n\ndef listar_comillas(lista):\n es_lista = False\n paso = False\n for i in range(len(lista)):\n if paso == False:\n if es_lista == False:\n contador_lista = 0\n contador_lista2 = 0\n\n if len(lista[i]) > 0:\n if lista[i][0] == '\\\"':\n contador_lista = i\n es_lista = True\n\n elif lista[i][-1] == '\\\"':\n contador_lista2 = i\n es_lista = False\n listax = crear_lista_datos_agrupados_en_comillas(\n lista, contador_lista, contador_lista2)\n lista_pop = crear_listapop(\n lista, contador_lista, contador_lista2)\n lista.insert(contador_lista, listax)\n popearx(lista, lista_pop)\n paso = True\n\n\nfor j in range(len(data)):\n for i in range(11):\n listar_comillas(data[j])\n\ndatos = datos.split(\",\")\n\n# hasta aca funciona y cree 2 listas, data es toda la informacion del dataset\n# y datos dice a que se refiere cada dato de cada pelicula,\n# ej: nombre, año de lanzamiento, etc...\n\npeliculas = []\nfor i in data:\n if i[1] == 'Movie':\n peliculas.append(i)\n\naños_agregados = []\naños_agregados_i = []\nfor i in range(len(peliculas)):\n if len(peliculas[i][6]) > 0:\n año_agregado = peliculas[i][6][1]\n año_agregado = int(año_agregado[1:])\n años_agregados.append(año_agregado)\n años_agregados_i.append(i)\n\naños_creados = []\nfor i in range(len(peliculas)):\n años_creado = int(peliculas[i][7])\n if i in años_agregados_i:\n años_creados.append(años_creado)\n\nduracion_peliculas = []\nfor i in range(len(peliculas)):\n duracion = peliculas[i][9]\n duracion = int(duracion[:-4])\n if i in años_agregados_i:\n duracion_peliculas.append(duracion)\n\ndiferencia_años = []\nfor i in range(len(años_agregados)):\n diferencia_años.append(años_agregados[i] - años_creados[i])\n\ndiferencia_duracion = []\nfor i in range(len(diferencia_años)):\n diferencia_duracion.append([diferencia_años[i], duracion_peliculas[i]])\n\n\ndef por_años(list):\n return(list[0])\n\n\ndiferencia_duracion.sort(key=por_años)\n\ncantidad_intervalos = 1+3.22*math.log(len(diferencia_años), 10)\n\nintervalos = [0, 9, 18, 27, 36, 45, 54, 63, 72, 81, 90, 99]\npromedio_duracion = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nfor i in range(1, 12):\n suma_intervalo = 0\n cantidad_intervalo = 0\n for j in diferencia_duracion:\n if intervalos[i - 1] <= j[0] < intervalos[i]:\n suma_intervalo += j[1]\n cantidad_intervalo += 1\n promedio_duracion[i - 1] = suma_intervalo/cantidad_intervalo\n\nintervalos_años = [9, 18, 27, 36, 45, 54, 63, 72, 81, 90, 99]\n\n\nfig, ax = mplt.subplots()\nax.plot(intervalos_años, promedio_duracion)\nmplt.show()\n", "sub_path": "P3.py", "file_name": "P3.py", "file_ext": "py", "file_size_in_byte": 3960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "math.log", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "503465554", "text": "from __future__ import division\nimport numpy as np\nfrom PyQt4.QtCore import pyqtSignal as Signal\nfrom vispy import app\nfrom vispy import gloo\nfrom vispy.visuals.shaders import ModularProgram\nfrom vispy.visuals import Visual, LinePlotVisual, LineVisual, PolygonVisual\nfrom vispy.visuals.transforms import (STTransform, LogTransform,\n TransformSystem, ChainTransform)\nfrom pyqtgraph import QtGui, QtCore\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\nfrom Points import *\nfrom Visuals import *\nfrom file_ import *\n\nclass Canvas(QtCore.QObject, app.Canvas):\n roiCreated = Signal(object)\n roiDeleted = Signal(object)\n def __init__(self):\n QtCore.QObject.__init__(self)\n app.Canvas.__init__(self, keys='interactive', resizable=False)\n ps = self.pixel_scale\n\n self.roi_visuals = []\n self.current_roi = None\n self.finished = False\n self.drawing_roi = False\n\n self.markers = []\n self.panzoom = STTransform(scale=(1, 1), translate=(0, 0))\n self.transform = ChainTransform([self.panzoom,\n STTransform(scale=[1, 1], translate=[1,1]),\n LogTransform(base=(0, 0, 0))])\n\n self.tr_sys = TransformSystem(self)\n self.tr_sys.visual_to_document = self.transform\n\n gloo.set_state(blend=True,\n blend_func=('src_alpha', 'one_minus_src_alpha'))\n\n self.timer = app.Timer('auto', connect=self.on_timer, start=True)\n #self.native.setFixedSize(800, 600)\n\n def on_timer(self, event):\n self.update()\n\n def on_mouse_release(self, event):\n if event.button == 2 and self.drawing_roi:\n self.drawing_roi = False\n self.current_roi.draw_finished()\n self.roi_visuals.append(self.current_roi)\n self.current_roi.menu.addAction(QtGui.QAction('Export ROIs', self.current_roi.menu, triggered=lambda : save_file_gui(self.export_rois, prompt='Export ROIs to text file', filetypes='Text Files (*.txt)')))\n self.current_roi.menu.addAction(QtGui.QAction('Import ROIs', self.current_roi.menu, triggered=lambda : open_file_gui(self.import_rois, prompt='Import ROIs from text file', filetypes='Text Files (*.txt)')))\n self.current_roi.select()\n self.roiCreated.emit(self.current_roi)\n elif any([roi.hover for roi in self.roi_visuals]) and event.button == 2 and not self.drawing_roi and event.last_event.type == 'mouse_press':\n self.current_roi.contextMenuEvent(self.native.mapToGlobal(QtCore.QPoint(*event.pos)))\n for roi in self.roi_visuals:\n if roi.selected:\n roi.finish_translate()\n \n def export_rois(self, fname):\n roi_strs = [repr(roi) for roi in self.roi_visuals]\n with open(fname, 'w') as outf:\n outf.write('\\n'.join(roi_strs))\n\n def import_rois(self, fname):\n rois = ROIVisual.importROIs(fname)\n for roi in rois:\n while roi.id in [r.id for r in self.roi_visuals]:\n roi.setId(roi.id + 1)\n self.roi_visuals.append(roi)\n\n def translatedPoint(self, pos):\n return np.array([(pos[0] - self.panzoom.translate[0]) / self.panzoom.scale[0], (pos[1] - self.panzoom.translate[1]) / self.panzoom.scale[1]])\n\n def on_mouse_press(self, event):\n if self.drawing_roi:\n return\n if 'Control' in event.modifiers:\n for roi in self.roi_visuals:\n if roi.contains(self.translatedPoint(event.pos)):\n if roi.selected:\n roi.deselect()\n else:\n roi.select()\n else:\n self.current_roi = None\n for roi in self.roi_visuals:\n if roi.mouseIsOver(self.translatedPoint(event.pos)):\n self.current_roi = roi\n roi.select()\n else:\n roi.deselect()\n\n def on_key_press(self, event):\n if event.key == 'a' and 'Control' in event.modifiers:\n for roi in self.roi_visuals:\n roi.select()\n elif event.key == 'Delete':\n for roi in self.roi_visuals[:]:\n if roi.selected:\n self.delete_roi(roi)\n \n def remove_roi(self, roi):\n if self.current_roi == roi:\n self.current_roi = None\n self.roi_visuals.remove(roi)\n self.roiDeleted.emit(roi)\n\n\n def on_mouse_move(self, event):\n pos = self.translatedPoint(event.pos)\n for roi in self.roi_visuals:\n if not self.drawing_roi:\n if roi.mouseIsOver(pos):\n self.current_roi = roi\n if event.is_dragging:\n dxy = self.translatedPoint(event.pos) - self.translatedPoint(event.last_event.pos)\n button = event.press_event.button\n if button == 1:\n self.panzoom.move(event.pos - event.last_event.pos)\n elif button == 2:\n if not self.drawing_roi and any([roi.mouseIsOver(pos) for roi in self.roi_visuals]):\n for roi in self.roi_visuals:\n if roi.selected:\n roi.translate(dxy)\n elif self.drawing_roi == True:\n self.current_roi.extend(pos)\n else:\n for roi in self.roi_visuals:\n roi.deselect()\n new_id = 1\n while new_id in [roi.id for roi in self.roi_visuals]:\n new_id += 1\n self.current_roi = ROIVisual(new_id, pos)\n self.drawing_roi = True\n self.update()\n\n def on_mouse_wheel(self, event):\n center = event.pos\n dz = event.delta[1]\n self.panzoom.zoom(np.exp(np.array([.1, .1]) * dz), center)\n\n def on_resize(self, event):\n self.width, self.height = event.size\n gloo.set_viewport(0, 0, self.width, self.height)\n\n def on_draw(self, event):\n gloo.clear()\n for ch in self.markers:\n ch.draw(self.tr_sys)\n\n for roi in self.roi_visuals:\n roi.draw(self.tr_sys)\n \n if self.current_roi != None:\n self.current_roi.draw(self.tr_sys)\n ", "sub_path": "Canvas2D.py", "file_name": "Canvas2D.py", "file_ext": "py", "file_size_in_byte": 6381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pyqtgraph.QtCore.QObject", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 17, "usage_type": "name"}, {"api_name": "vispy.app.Canvas", "line_number": 17, "usage_type": "attribute"}, {"api_name": "vispy.app", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.pyqtSignal", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.pyqtSignal", "line_number": 19, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore.QObject.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore.QObject", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 21, "usage_type": "name"}, {"api_name": "vispy.app.Canvas.__init__", "line_number": 22, "usage_type": "call"}, {"api_name": "vispy.app.Canvas", "line_number": 22, "usage_type": "attribute"}, {"api_name": "vispy.app", "line_number": 22, "usage_type": "name"}, {"api_name": "vispy.visuals.transforms.STTransform", "line_number": 31, "usage_type": "call"}, {"api_name": "vispy.visuals.transforms.ChainTransform", "line_number": 32, "usage_type": "call"}, {"api_name": "vispy.visuals.transforms.STTransform", "line_number": 33, "usage_type": "call"}, {"api_name": "vispy.visuals.transforms.LogTransform", "line_number": 34, "usage_type": "call"}, {"api_name": "vispy.visuals.transforms.TransformSystem", "line_number": 36, "usage_type": "call"}, {"api_name": "vispy.gloo.set_state", "line_number": 39, "usage_type": "call"}, {"api_name": "vispy.gloo", "line_number": 39, "usage_type": "name"}, {"api_name": "vispy.app.Timer", "line_number": 42, "usage_type": "call"}, {"api_name": "vispy.app", "line_number": 42, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QAction", "line_number": 53, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 53, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QAction", "line_number": 54, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 54, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.QPoint", "line_number": 58, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "vispy.gloo.set_viewport", "line_number": 148, "usage_type": "call"}, {"api_name": "vispy.gloo", "line_number": 148, "usage_type": "name"}, {"api_name": "vispy.gloo.clear", "line_number": 151, "usage_type": "call"}, {"api_name": "vispy.gloo", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "66757592", "text": "import logging\nimport pickle\nimport json\nimport numpy as np\nimport cdl\nimport json\nimport argparse\nimport math\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import accuracy_score\nfrom keras.utils import plot_model\nfrom keras.layers import Activation, LeakyReLU\nfrom keras import optimizers\nfrom keras import callbacks\nfrom utils import custom_load, save_result\n\ndef bagging_predict(models, X):\n pred_Y = np.zeros(X.shape[0])\n for model in models:\n pred_y = model.predict(X)\n pred_Y = np.add(pred_Y, pred_y)\n return pred_Y > len(models) // 2\n\ndef main(config):\n logging.info('Reading data')\n X, Y, user_feature, item_feature, pred_X = custom_load(**config)\n\n num_user_feature = user_feature.shape[1] # 13\n num_item_feature = item_feature.shape[1] # 11\n\n testing_acc = []\n training_acc = []\n bagging_history = []\n n_estimater = 5\n k_fold = 5\n\n for i, (train_index, test_index,) in enumerate(KFold(n_splits=k_fold, shuffle=True).split(X)):\n X_train, X_test = X[train_index], X[test_index]\n Y_train, Y_test = Y[train_index], Y[test_index]\n\n bagging_models = []\n bagging_original_train_acc = []\n bagging_original_test_acc = []\n for i in range(n_estimater):\n smaple_index = np.random.choice(X_train.shape[0], size=X_train.shape[0])\n model = cdl.CDL(\n user_feature, item_feature,\n user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature],\n item_layer_dim=[num_item_feature, 7, num_item_feature],\n lamda_u=32,\n lamda_w=0.1344613232775228,\n lamda_v=0.27891447944038594,\n encoder_noise=0.2,\n dropout_rate=0.4,\n activation='selu')\n\n model.fit(\n X_train[smaple_index], Y_train[smaple_index],\n X_test, Y_test,\n lamda_n=1.2001027195781027,\n lamda_m=2.489385178928047,\n lamda_c=47.80028677294353,\n batch_size=150,\n epochs=300,\n optimizer=optimizers.Adam(lr=0.001),\n callbacks=[callbacks.EarlyStopping(\n monitor='val_loss',\n min_delta=0,\n patience=10,\n verbose=0,\n mode='auto')]\n )\n\n bagging_original_train_acc.append(model.get_accuracy(X_train, Y_train))\n bagging_original_test_acc.append(model.get_accuracy(X_test, Y_test))\n bagging_models.append(model)\n\n training_acc.append(accuracy_score(Y_train, bagging_predict(bagging_models, X_train)))\n testing_acc.append(accuracy_score(Y_test, bagging_predict(bagging_models, X_test)))\n bagging_history.append({\n 'bagging_original_train_acc': bagging_original_train_acc,\n 'bagging_original_test_acc': bagging_original_test_acc,\n })\n logging.info(\"Accuracy: %f\" % testing_acc[-1])\n\n for i in range(k_fold):\n logging.info(\"%d-fold Training Accuracy: [%s]\" % (i, bagging_history[i]['bagging_original_train_acc']))\n logging.info(\"%d-fold Testing Accuracy: [%s]\" % (i, bagging_history[i]['bagging_original_test_acc']))\n logging.info(\"%d-fold Training bagging result: %f\" % (i, training_acc[i]))\n logging.info(\"%d-fold Testing bagging result: %f\" % (i, testing_acc[i]))\n\ndef save_model(config):\n\n X, Y, users, items, pred_X = custom_load(**config)\n\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=1)\n\n num_user_feature = users.shape[1] # 13\n num_item_feature = items.shape[1] # 11\n\n model = cdl.CDL(\n users, items,\n user_layer_dim=[num_user_feature, 10, 7, 10, num_user_feature],\n item_layer_dim=[num_item_feature, 7, num_item_feature],\n lamda_w=5, # regula\n lamda_u=5, # User offset regu\n lamda_v=5, # Item offset regu\n encoder_noise=0.2,\n dropout_rate=0.4,\n activation=LeakyReLU(0.3))\n\n logging.info(\"Autoencoder init Entropy: %s\" % str(model.get_autoencoder_loss()))\n \n model.fit(\n X_train, Y_train,\n X_test, Y_test,\n lamda_n=5, # Item decode loss\n lamda_m=5, # User decode loss\n lamda_c=10, # Predict loss\n batch_size=32,\n epochs=300,\n optimizer=optimizers.Adam(lr=0.001),\n # callbacks=[callbacks.EarlyStopping(\n # monitor='val_loss',\n # min_delta=0,\n # patience=5,\n # verbose=1,\n # mode='auto')]\n )\n\n logging.info(\"Autoencoder final Entropy: %s\" % str(model.get_autoencoder_loss()))\n logging.info(\"Accuracy: %f\" % model.get_accuracy(X_test, Y_test))\n model.get_model().save('./models/CDL-%f.h5' % model.get_accuracy(X_test, Y_test))\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')\n parser = argparse.ArgumentParser(description='PTT Crawer')\n parser.add_argument('-c', '--config', type=str, default=\"./config.json\")\n parser.add_argument('-s', '--save_model', action='store_true')\n args = parser.parse_args()\n with open(args.config, 'r') as config_file:\n config = json.loads(config_file.read())\n if args.save_model:\n save_model(config)\n else:\n main(config)", "sub_path": "Assignment-4/src/bagging_pure.py", "file_name": "bagging_pure.py", "file_ext": "py", "file_size_in_byte": 5524, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.custom_load", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cdl.CDL", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 66, "usage_type": "name"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 67, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.custom_load", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 97, "usage_type": "call"}, {"api_name": "cdl.CDL", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 123, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 123, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 132, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 137, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 138, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "277918118", "text": "# encoding: utf-8\nimport telegram\nimport db_controller\nfrom telegram.ext import Updater, CallbackQueryHandler, CommandHandler\nfrom telegram import InlineKeyboardButton,InlineKeyboardMarkup\n\n#MSG DEFINED\nREGISTER_REPLY_WRONG = \"Ya está registrado, no debe usar mas este comando\"\nREGISTER_REPLY_GOOD = \"Se acaba de registrar {}\"\nUSER_NOT_REGISTERED = \"Introduzca /start para crear una cuenta antes de hacer nada\"\nCOMPRAR_REPLY = \"Elige que quieres comprar:\"\nMETE_DINERO_ARG_ERROR = \"Argumentos invalidos, el formato de los comandos es \\mete_dinero dinero\"\nMETE_DINERO_REPLY = \"TIENES {}€\"\nDINERO_REPLY = \"TIENES {}€\"\nAYUDA_REPLY = '''Hola, este es el bot de gestion de la nevera de la onda\nSi no pertenece a la onda y ha encontrado el bot, por favor ignorelo\nSi es la primera vez que ejecuta el bot, por favor introduzca /start para crear un usuario\nDispone de los siguientes comandos:\n/comprar despliega un menu con las opciones a comprar\n /dinero muestra su saldo\n/metedinero dinero añade dinero a su saldo\nNo tiene limite negativo en el saldo,pero sea consecuente con las deudas\n '''\n\n#Callback Functions\ndef start(bot,update):\n #bot.send_message(chat_id=update.message.chat_id, text=\"Arrancado\")\n usuario = update.message.from_user\n if db_controller.is_registered(usuario['id']):\n bot.send_message(chat_id=update.message.chat_id,text=REGISTER_REPLY_WRONG)\n return\n db_controller.crear_usuario(usuario['id'])\n bot.send_message(chat_id=update.message.chat_id,text=REGISTER_REPLY_GOOD.format(usuario['username']))\n \n \ndef comprar(bot,update,user_data):\n usuario = update.message.from_user\n if not db_controller.is_registered(usuario['id']):\n bot.send_message(chat_id=update.message.chat_id,text=USER_NOT_REGISTERED)\n return\n keyboard = gen_productos_keyboard()\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.message.reply_text(COMPRAR_REPLY, reply_markup=reply_markup) \n\ndef mete_dinero(bot,update,args):\n usuario = update.message.from_user\n if not db_controller.is_registered(usuario['id']):\n bot.send_message(chat_id=update.message.chat_id,text=USER_NOT_REGISTERED)\n return\n print(str(args))\n if len(args) != 1:\n bot.send_message(chat_id=update.message.chat_id,text=METE_DINERO_ARG_ERROR)\n return\n usuario = update.message.from_user\n user_id = usuario['id']\n cantidad = float(args[0])\n dinero = db_controller.add_dinero(user_id,cantidad)\n bot.send_message(chat_id=update.message.chat_id,text=METE_DINERO_REPLY.format(dinero))\n\ndef dinero(bot,update):\n usuario = update.message.from_user\n if not db_controller.is_registered(usuario['id']):\n bot.send_message(chat_id=update.message.chat_id,text=USER_NOT_REGISTERED)\n return\n user_id = usuario['id']\n bot.send_message(chat_id=update.message.chat_id,text=DINERO_REPLY.format(db_controller.get_dinero(user_id)))\n\ndef ayuda(bot,update):\n bot.send_message(chat_id=update.message.chat_id,text=AYUDA_REPLY)\n\n#Callback query\n\ndef button(bot,update,user_data):\n query = update.callback_query\n print(user_data)\n usuario = query.from_user\n print(\"MI ID QUERY ES: \"+str(usuario['id']))\n money = db_controller.comprar(usuario['id'],query.data)\n bot.edit_message_text(chat_id=query.message.chat_id,message_id=query.message.message_id,text=\"Opcion: {} \\n Le quedan {} euros\".format(query.data,money))\n\n#Aux\n\ndef gen_productos_keyboard():\n lista_productos = db_controller.request_products()\n keyboard = []\n for elm in lista_productos:\n #print(elm)\n keyboard.append([InlineKeyboardButton(str(elm),callback_data=str(elm))])\n #print(keyboard)\n return keyboard\n", "sub_path": "bot_telegram_callback.py", "file_name": "bot_telegram_callback.py", "file_ext": "py", "file_size_in_byte": 3722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "db_controller.is_registered", "line_number": 29, "usage_type": "call"}, {"api_name": "db_controller.crear_usuario", "line_number": 32, "usage_type": "call"}, {"api_name": "db_controller.is_registered", "line_number": 38, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 42, "usage_type": "call"}, {"api_name": "db_controller.is_registered", "line_number": 47, "usage_type": "call"}, {"api_name": "db_controller.add_dinero", "line_number": 57, "usage_type": "call"}, {"api_name": "db_controller.is_registered", "line_number": 62, "usage_type": "call"}, {"api_name": "db_controller.get_dinero", "line_number": 66, "usage_type": "call"}, {"api_name": "db_controller.comprar", "line_number": 78, "usage_type": "call"}, {"api_name": "db_controller.request_products", "line_number": 84, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "535816993", "text": "import json\ndata='''\n[\n {\n \"name\" : \"Chuck\",\n \"id\" : \"2\",\n \"x\" : \"3\"\n },\n {\n \"name\" : \"Harsh\",\n \"id\" : \"3\",\n \"x\" : \"7\"\n }\n]'''\n\ninfo=json.loads(data)\nfor item in info:\n print(\"Name: \",item[\"name\"])\n print(\"X: \",item[\"x\"])\n print(\"ID: \",item[\"id\"])\n", "sub_path": "course3/json_.py", "file_name": "json_.py", "file_ext": "py", "file_size_in_byte": 319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "json.loads", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "557730876", "text": "import time\n\nimport dateutil.parser\nimport numpy as np\nfrom bitcoin.models import ActualPrice, AvgPrice, PriceTable\nfrom dateutil import tz\nfrom django.core.management.base import BaseCommand\n\nfrom ._api import *\n\n\nclass Command(BaseCommand):\n help = 'Downloads actual rates for currency'\n\n def handle(self, *args, **options):\n # download actual bitcoin price\n hand = getProductTicker(url)\n from_zone = tz.tzutc()\n to_zone = tz.gettz('Europe/Warsaw')\n utc = dateutil.parser.parse(hand['time'])\n utc = utc.replace(tzinfo=from_zone)\n local = utc.astimezone(to_zone)\n\n new_record = ActualPrice(date=str(local)[:-13], price=hand['ask'])\n new_record.save()\n\n # avg bitcoin price from 10 minutes\n res = getProductHistoricRates(url, start, end, 2)\n rates = []\n for i in res:\n rates.append(i[1])\n rates.append(i[2])\n avg = \"%.2f\" % np.mean(rates)\n\n new_rec = AvgPrice(avg_price=avg)\n new_rec.save()\n\n # table of max and min prices of bitcoin from 10 minutes\n result = getProductHistoricRates(url, start, end, 60)\n for i in result:\n date = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(i[0]))\n new = PriceTable(date=date, min_price=\"%.2f\" % i[1], max_price=\"%.2f\" % i[2])\n new.save()\n self.stdout.write('\\033[02;32m' + \"Data is downloaded and stored in database :)\" + '\\033[00m', ending='\\n')\n", "sub_path": "mysite/bitcoin/management/commands/download_rates.py", "file_name": "download_rates.py", "file_ext": "py", "file_size_in_byte": 1489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 12, "usage_type": "name"}, {"api_name": "dateutil.tz.tzutc", "line_number": 18, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 18, "usage_type": "name"}, {"api_name": "dateutil.tz.gettz", "line_number": 19, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 19, "usage_type": "name"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 20, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 20, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 20, "usage_type": "name"}, {"api_name": "bitcoin.models.ActualPrice", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 33, "usage_type": "call"}, {"api_name": "bitcoin.models.AvgPrice", "line_number": 35, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 41, "usage_type": "call"}, {"api_name": "bitcoin.models.PriceTable", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "549407501", "text": "\"\"\"\n\n--------Function Details-----------\n\nfunction recive Recive: File Hand + page Url that it should scrap from\n\nfunction return : 1 for sucsess / -1 for fail\n\n----------Function Description-----------\n\nThis function call each time with diffrent website Url and return 1 if succeded and -1 if not\n\nIts insett a data to the relevent page.\n\n? - Target of this page is to get the relevan part of the page.\n\n? - Title / Description / H1 / Keywords / Content / Images if there are.\n\n* - The content must be clean from any other non-releven objects\n\n* - Images should be inserted to a new folder that will contain all the website images\n\n\n\"\"\"\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef scrap_page(sample_url):\n \n page = requests.get(sample_url)\n\n\n #print(page.text)\n\n soup = BeautifulSoup(page.text, 'html.parser')\n\n print(\"--------script running----------------\\n\")\n\n\n print(soup.prettify())\n\n # print(soup.title)\n #print(soup.h1)\n\n #keywords = soup.findAll(\"meta\", property=\"keywords\")\n #keywords = soup.find(name=\"keywords\")\n #title = soup.find(\"meta\", property=\"og:title\")\n\n\n\n #print(keywords)\n\n\n\n\n#Moudlue main for testing only\nif __name__ == '__main__':\n\n \n\n sample_url = \"http://www.ask-tal.co.il/%D7%91%D7%9C%D7%A2%D7%93%D7%99%D7%95%D7%AA\"\n \n \n scrap_page(sample_url)\n\n\n", "sub_path": "soup_scrap.py", "file_name": "soup_scrap.py", "file_ext": "py", "file_size_in_byte": 1338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "245431556", "text": "from .config import DEVICE, DEFAULT_CONFIG\nfrom .models.encoder import Encoder\nfrom .models.decoder import Decoder\nfrom .models.seq2seq import Seq2Seq\nfrom ...base.model import BaseConfig, BaseModel\n\n\nclass CBConfig(BaseConfig):\n def __init__(self, word_vocab, vector_path, **kwargs):\n super(CBConfig, self).__init__()\n for name, value in DEFAULT_CONFIG.items():\n setattr(self, name, value)\n self.word_vocab = word_vocab\n self.vocabulary_size = len(self.word_vocab)\n self.vector_path = vector_path\n for name, value in kwargs.items():\n setattr(self, name, value)\n\n\nclass CBSeq2Seq(BaseModel):\n def __init__(self, args):\n super(CBSeq2Seq, self).__init__(args)\n self.args = args\n self.hidden_dim = args.embedding_dim\n self.vocabulary_size = args.vocabulary_size\n self.batch_size = args.batch_size\n self.save_path = args.save_path\n self.num_layers = args.num_layers\n self.dropout = args.dropout\n self.teacher_forcing_ratio = args.teacher_forcing_ratio\n\n vocabulary_size = args.vocabulary_size\n embedding_dimension = args.embedding_dim\n\n encoder = Encoder(vocabulary_size, embedding_dimension, self.hidden_dim, self.num_layers,\n self.dropout).to(DEVICE)\n decoder = Decoder(self.hidden_dim, embedding_dimension, vocabulary_size, self.num_layers, self.dropout,\n args.method).to(DEVICE)\n self.seq2seq = Seq2Seq(encoder, decoder).to(DEVICE)\n\n def forward(self, src, trg, teacher_forcing_ratio=0.5):\n return self.seq2seq(src, trg, teacher_forcing_ratio)\n\n def predict(self, src, src_lens, sos, max_len):\n return self.seq2seq.predict(src, src_lens, sos, max_len)\n", "sub_path": "lightnlp/tg/cb/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "base.model.BaseConfig", "line_number": 8, "usage_type": "name"}, {"api_name": "config.DEFAULT_CONFIG.items", "line_number": 11, "usage_type": "call"}, {"api_name": "config.DEFAULT_CONFIG", "line_number": 11, "usage_type": "name"}, {"api_name": "base.model.BaseModel", "line_number": 20, "usage_type": "name"}, {"api_name": "config.DEVICE", "line_number": 36, "usage_type": "argument"}, {"api_name": "models.encoder.Encoder", "line_number": 35, "usage_type": "call"}, {"api_name": "config.DEVICE", "line_number": 38, "usage_type": "argument"}, {"api_name": "models.decoder.Decoder", "line_number": 37, "usage_type": "call"}, {"api_name": "config.DEVICE", "line_number": 39, "usage_type": "argument"}, {"api_name": "models.seq2seq.Seq2Seq", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "328444183", "text": "#\r\n# Vincent Charming (c) 2019\r\n#\r\n\"\"\"\r\nQuestion for SWE I Interview\r\nTask: Print out the first 20 numbers in the fibonacci series.\r\n\"\"\"\r\n\r\n__author__ = 'vcharming'\r\n\r\nimport argparse\r\nimport logging\r\nimport sys\r\n\r\n# Initialize logger config\r\nlogger = logging.getLogger(__name__)\r\nLOG_FORMAT = '[%(filename)s:%(lineno)s - %(levelname)-5s ] %(message)s'\r\nlogging.basicConfig(format=LOG_FORMAT)\r\nlogger.setLevel(logging.DEBUG)\r\n\r\n\r\ndef print_fibonacci(n):\r\n '''\r\n Prints the fibonacci sequence to n places\r\n :param n: positive integer\r\n :return:\r\n '''\r\n # Data Validation\r\n error_message = 'Parameter must be a positive integer.'\r\n if not isinstance(n, int):\r\n raise TypeError(error_message)\r\n if n <= 0:\r\n raise ValueError(error_message)\r\n \r\n current_num_behind_2, current_num_behind_1, current_num = 0, 1, 1\r\n for i in range(n):\r\n logger.info(current_num)\r\n current_num = current_num_behind_2 + current_num_behind_1\r\n current_num_behind_2 = current_num_behind_1\r\n current_num_behind_1 = current_num\r\n return\r\n\r\n\r\ndef init_parser():\r\n '''\r\n Initialize an argument parser for command line options\r\n\r\n :return An instance of argparse.ArgumentParser\r\n '''\r\n\r\n parser = argparse.ArgumentParser(description=\"Print the fibonacci series to the nth place\")\r\n\r\n parser.add_argument(\r\n \"num_of_places\",\r\n help=\"The number of places to print out the fibonacci sequence.\")\r\n\r\n return parser\r\n\r\n\r\ndef main(args):\r\n '''\r\n Print the fibonacci series to the nth place\r\n :param args: The parsed command line arguments\r\n '''\r\n\r\n print_fibonacci(int(args.num_of_places))\r\n\r\n sys.exit()\r\n\r\n\r\nif __name__ == '__main__':\r\n p = init_parser()\r\n sys.exit(main(p.parse_args()))", "sub_path": "python/tools/fibonacci.py", "file_name": "fibonacci.py", "file_ext": "py", "file_size_in_byte": 1791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "115274258", "text": "from cocaine.burlak import burlak, config\nfrom cocaine.burlak.context import Context, LoggerSetup\nfrom cocaine.burlak.control_filter import ControlFilter\n\nimport pytest\n\nfrom tornado import queues\n\nfrom .common import ASYNC_TESTS_TIMEOUT\nfrom .common import make_future, make_logger_mock, make_mock_channel_with\n\n\nCF = ControlFilter\n\n\ntest_filters = [\n # control_filter, unicorn version\n (dict(apply_control=True, white_list=[]), 0),\n (dict(apply_control=False, white_list=[]), 1),\n (dict(apply_control=True, white_list=['a', 'b', 'c']), 2),\n (dict(apply_control=False, white_list=['c', 'b', 'a']), 3),\n]\n\n\nexcept_filters = [\n Exception('error', \"I'm broken 1\"),\n (ControlFilter.with_defaults(), 1),\n Exception('error', \"I'm broken 2\"),\n Exception('error', \"I'm broken 2\"),\n]\n\n\ndef eq(a, b):\n return a == b\n\n\ndef ne(a, b):\n return a != b\n\n\n@pytest.mark.parametrize(\n 'a,b,op',\n [\n (CF(True, ['a', 'b', 'c']), CF(True, ['a', 'b', 'c']), eq),\n (CF(True, ['a', 'b', 'c']), CF(True, ['a', 'b', 'z']), ne),\n (CF(True, ['a', 'b', 'c']), CF(False, ['a', 'b', 'c']), ne),\n (CF(True, ['a', 'b', 'c', 'k']), CF(True, ['a', 'b', 'c']), ne),\n ]\n)\ndef test_filter_eq(a, b, op):\n assert op(a, b)\n\n\n@pytest.fixture\ndef filter_listener(mocker):\n logger = make_logger_mock(mocker)\n filter_queue, input_queue = queues.Queue(), queues.Queue()\n\n cfg = config.Config(mocker.Mock())\n sentry_wrapper = mocker.Mock()\n\n context = Context(\n LoggerSetup(logger, False),\n cfg,\n '0',\n sentry_wrapper,\n mocker.Mock(),\n )\n\n unicorn = mocker.Mock()\n unicorn.subscribe = mocker.Mock()\n\n return burlak.ControlFilterListener(\n context, unicorn, filter_queue, input_queue)\n\n\n@pytest.mark.gen_test(timeout=ASYNC_TESTS_TIMEOUT)\ndef test_filter_listener(filter_listener, mocker):\n stop_side_effect = [True for _ in test_filters]\n stop_side_effect.append(False)\n\n mocker.patch.object(\n burlak.LoopSentry, 'should_run', side_effect=stop_side_effect)\n mocker.patch('tornado.gen.sleep', return_value=make_future(0))\n\n filter_listener.unicorn.subscribe = mocker.Mock(\n side_effect=[make_mock_channel_with(*test_filters)]\n )\n\n yield filter_listener.subscribe_to_control_filter()\n\n if test_filters:\n first_update = yield filter_listener.filter_queue.get()\n filter_listener.filter_queue.task_done()\n\n f, _ = test_filters[0]\n\n assert first_update.control_filter == ControlFilter.from_dict(f)\n\n\n@pytest.mark.gen_test(timeout=ASYNC_TESTS_TIMEOUT)\ndef test_filter_with_except(filter_listener, mocker):\n stop_side_effect = [True for _ in except_filters]\n stop_side_effect.append(False)\n\n mocker.patch.object(\n burlak.LoopSentry, 'should_run', side_effect=stop_side_effect)\n mocker.patch('tornado.gen.sleep', return_value=make_future(0))\n\n filter_listener.unicorn.subscribe = mocker.Mock(\n side_effect=[make_mock_channel_with(*except_filters)]\n )\n\n yield filter_listener.subscribe_to_control_filter()\n\n if except_filters:\n\n cfg = config.Config(mocker.Mock())\n\n first_update = yield filter_listener.filter_queue.get()\n filter_listener.filter_queue.task_done()\n\n assert first_update.control_filter == cfg.control_filter\n", "sub_path": "tests/test_control_filter.py", "file_name": "test_control_filter.py", "file_ext": "py", "file_size_in_byte": 3339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cocaine.burlak.control_filter.ControlFilter", "line_number": 13, "usage_type": "name"}, {"api_name": "cocaine.burlak.control_filter.ControlFilter.with_defaults", "line_number": 27, "usage_type": "call"}, {"api_name": "cocaine.burlak.control_filter.ControlFilter", "line_number": 27, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 41, "usage_type": "attribute"}, {"api_name": "common.make_logger_mock", "line_number": 56, "usage_type": "call"}, {"api_name": "tornado.queues.Queue", "line_number": 57, "usage_type": "call"}, {"api_name": "tornado.queues", "line_number": 57, "usage_type": "name"}, {"api_name": "cocaine.burlak.config.Config", "line_number": 59, "usage_type": "call"}, {"api_name": "cocaine.burlak.config", "line_number": 59, "usage_type": "name"}, {"api_name": "cocaine.burlak.context.Context", "line_number": 62, "usage_type": "call"}, {"api_name": "cocaine.burlak.context.LoggerSetup", "line_number": 63, "usage_type": "call"}, {"api_name": "cocaine.burlak.burlak.ControlFilterListener", "line_number": 73, "usage_type": "call"}, {"api_name": "cocaine.burlak.burlak", "line_number": 73, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cocaine.burlak.burlak.LoopSentry", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cocaine.burlak.burlak", "line_number": 83, "usage_type": "name"}, {"api_name": "common.make_future", "line_number": 84, "usage_type": "call"}, {"api_name": "common.make_mock_channel_with", "line_number": 87, "usage_type": "call"}, {"api_name": "cocaine.burlak.control_filter.ControlFilter.from_dict", "line_number": 98, "usage_type": "call"}, {"api_name": "cocaine.burlak.control_filter.ControlFilter", "line_number": 98, "usage_type": "name"}, {"api_name": "pytest.mark.gen_test", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 77, "usage_type": "attribute"}, {"api_name": "common.ASYNC_TESTS_TIMEOUT", "line_number": 77, "usage_type": "name"}, {"api_name": "cocaine.burlak.burlak.LoopSentry", "line_number": 107, "usage_type": "attribute"}, {"api_name": "cocaine.burlak.burlak", "line_number": 107, "usage_type": "name"}, {"api_name": "common.make_future", "line_number": 108, "usage_type": "call"}, {"api_name": "common.make_mock_channel_with", "line_number": 111, "usage_type": "call"}, {"api_name": "cocaine.burlak.config.Config", "line_number": 118, "usage_type": "call"}, {"api_name": "cocaine.burlak.config", "line_number": 118, "usage_type": "name"}, {"api_name": "pytest.mark.gen_test", "line_number": 101, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 101, "usage_type": "attribute"}, {"api_name": "common.ASYNC_TESTS_TIMEOUT", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "73602686", "text": "#!/usr/bin/python3\n'''\n Define class DatabaseStorage\n'''\nfrom sqlalchemy import create_engine, MetaData, desc\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom models.base_model import Base\nimport models\nfrom os import getenv\n\n\nclass DBStorage:\n '''Create SQLalchemy database'''\n __engine = None\n __session = None\n\n def __init__(self):\n '''Create engine and link to MySQL databse (dev_db) '''\n user = getenv(\"MYSQL_USER\")\n pwd = getenv(\"MYSQL_PWD\")\n host = getenv(\"MYSQL_HOST\")\n db = getenv(\"MYSQL_DB\")\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format(\n user, pwd, host, db), pool_pre_ping=True)\n \n\n def new(self, obj):\n '''Add object to current database session'''\n self.__session.add(obj)\n\n\n def save(self):\n '''Commit all changes of current database session'''\n self.__session.commit()\n\n\n def reload(self):\n self.__session = Base.metadata.create_all(bind=self.__engine)\n self.__session = scoped_session(\n sessionmaker(\n bind=self.__engine,\n expire_on_commit=False))\n\n \n def close(self):\n '''closes a session with the current database'''\n self.__session.remove()\n\n\n def getNewest(self, cls, amount):\n '''Retreives newest objects based on class and amount'''\n if not cls or not amount:\n return None\n\n entity = models.classes.get(cls)\n newest_obj = self.__session.query(entity).order_by(desc(entity.updated_at)).limit(amount).all()\n return newest_obj\n\n def getBetweenDate(self, cls, date1, date2):\n '''Retereives objects between two dates'''\n if not cls or not date1 or not date2:\n return None\n\n entity = models.classes.get(cls)\n dates = self.__session.query(entity).filter(entity.updated_at.between(date1, date2))\n return dates\n\n def to_json(self, all_objs):\n if not all_objs:\n return None\n\n array = []\n for v in all_objs:\n new_dict = {}\n arr = [a for a in dir(v) if not a.startswith('_') and not callable(getattr(v,a)) and not a.startswith('meta')]\n for item in arr:\n new_dict[item] = getattr(v, item)\n array.append(new_dict)\n\n return {\"all items\": array}\n\n", "sub_path": "models/engine/db_storage.py", "file_name": "db_storage.py", "file_ext": "py", "file_size_in_byte": 2431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 24, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata.create_all", "line_number": 39, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.base_model.Base", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 41, "usage_type": "call"}, {"api_name": "models.classes.get", "line_number": 56, "usage_type": "call"}, {"api_name": "models.classes", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sqlalchemy.desc", "line_number": 57, "usage_type": "call"}, {"api_name": "models.classes.get", "line_number": 65, "usage_type": "call"}, {"api_name": "models.classes", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "183767543", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport subprocess\nimport re\nimport string\n\nfrom bs4 import BeautifulSoup\nfrom jinja2 import Environment, PackageLoader\n\n\n# Set up vars\nenv = Environment(loader=PackageLoader('norman', 'templates'))\n\n\n# Utility Functions\n# -------------------------------\n\ndef template_factory(data, out_dir):\n def create_template(template_name):\n template = env.get_template(template_name)\n out = template.render(data=data)\n with open(\"%s/%s\" % (out_dir, template_name), \"w\") as f:\n f.write(out)\n return create_template\n\n\ndef create_dir(parent, name):\n new = os.path.join(parent, name)\n if not os.path.exists(new):\n os.makedirs(new)\n\n\ndef create_file(parent, name):\n new = os.path.join(parent, name)\n if not os.path.exists(new):\n open(new, 'w').close()\n\n\ndef find_root():\n working_dir = os.getcwd().split(os.sep)\n length = len(working_dir) + 1\n build_paths = filter(lambda x: x != '', ['/'.join(working_dir[:x]) for x in range(length)])\n paths = [x for x in reversed(build_paths)]\n for path in paths:\n test_root = os.path.join(path, '.quick')\n if os.path.isfile(test_root):\n return path\n return None\n\n\ndef create_style_tag(html, css):\n soup = BeautifulSoup(html, 'lxml')\n style = soup.find('style')\n style.string.replace_with(css)\n return soup.prettify()\n\n\ndef replace_containers(html):\n soup = BeautifulSoup(html, 'lxml')\n wrappers = soup.find_all(\"div\", {\"class\": \"container\"})\n for container in wrappers:\n table = soup.new_tag('table')\n table.attrs = container.attrs\n table['border'] = 0\n table['cellpadding'] = 0\n table['cellspacing'] = 0\n\n td = soup.new_tag('td')\n td.contents = container.contents\n\n tr = soup.new_tag('tr')\n tr.append(td)\n\n table.append(tr)\n container.replace_with(table)\n return soup.prettify()\n\n\ndef replace_rows(html):\n soup = BeautifulSoup(html, 'lxml')\n rows = soup.find_all(\"div\", {\"class\": \"row\"})\n for container in rows:\n table = soup.new_tag('table')\n table.attrs = container.attrs\n table['border'] = 0\n table['cellpadding'] = 0\n table['cellspacing'] = 0\n\n tr = soup.new_tag('tr')\n tr.contents = container.contents\n\n table.append(tr)\n container.replace_with(table)\n\n return soup.prettify()\n\n\ndef replace_cols(html):\n soup = BeautifulSoup(html, 'lxml')\n cols = soup.find_all(\"div\", {\"class\": \"column\"})\n for item in cols:\n td = soup.new_tag('td')\n td.contents = item.contents\n td.attrs = item.attrs\n item.replace_with(td)\n\n return soup.prettify()\n\n\ndef strip_floats(html):\n return html.replace('float:left;', '').replace('float:left', '')\n\n\ndef strip_template_tags(html):\n tag_match = re.compile(r\"()\")\n tags = tag_match.findall(html)\n for tag in tags:\n tag_contents = re.findall(r\"(?<=\\>)[\\s\\S]*(?=\\<\\/custom\\>)\", tag)[0]\n html = html.replace(tag, tag_contents)\n return html\n\ndef strip_template_tags_content(html):\n # Make dirs for partials\n root_dir = find_root()\n partials_dir = os.path.join(root_dir, 'partials')\n if not os.path.exists(partials_dir):\n os.makedirs(partials_dir)\n else:\n shutil.rmtree(partials_dir)\n os.makedirs(partials_dir)\n\n # Process\n tag_match = re.compile(r\"()\")\n tags = tag_match.findall(html)\n for tag in tags:\n tag_contents = re.findall(r\"(?<=\\>)[\\s\\S]*(?=\\<\\/custom\\>)\", tag)[0]\n open_tag = re.findall(r\"(?<=)\", tag)[0]\n name = re.findall(r\"(?<=name=(?:\\\"|\\')).*?(?=(?:\\\"|\\'))\", open_tag)[0]\n fname = os.path.join(partials_dir, \"%s.html\" % name.lower().replace(\" \", \"-\"))\n with open(fname, 'w') as f:\n f.write(tag_contents.encode('ascii', 'xmlcharrefreplace'))\n replacement = tag.replace(tag_contents, '')\n html = html.replace(tag, replacement)\n return html\n\n\ndef apply_test_transformations(html):\n transformations = [\n replace_containers,\n replace_rows,\n replace_cols,\n strip_floats,\n strip_template_tags\n ]\n return reduce(lambda x, y: y(x), transformations, html).encode('utf8')\n\ndef apply_package_transformations(html):\n transformations = [\n replace_containers,\n replace_rows,\n replace_cols,\n strip_floats,\n strip_template_tags_content\n ]\n return reduce(lambda x, y: y(x), transformations, html).encode('utf8')\n", "sub_path": "norman/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "jinja2.Environment", "line_number": 14, "usage_type": "call"}, {"api_name": "jinja2.PackageLoader", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 42, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 54, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 61, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 82, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 101, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 117, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 129, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 131, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 132, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 135, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 138, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 139, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "613635222", "text": "import World\nimport Camera\nimport CubeScreen\nimport pygame\nimport numpy\nimport time\n\nclass Game4D:\n\n MAX_FPS = 60.0\n # can make a circular queue to measure actual FPS\n BACKGROUND_COLOR = (256, 256, 256, 0)\n CUBESCREEN_SIZE = (5, 5, 5)\n SCREEN_SIZE = (200, 200)\n def __init__(self):\n self._resolution = self.SCREEN_SIZE\n self._3D_world = World.World(dimensions=3)\n self._3D_camera = Camera.Camera(self._resolution)\n self._4D_world = World.World(dimensions=4)\n self._4D_camera = Camera.Camera(self.CUBESCREEN_SIZE)\n self._screen = pygame.display.set_mode(self._resolution)\n\n # Add Cameras to their worlds.\n self._3D_world.addCamera(self._3D_camera, (0.0, 0.0, 0.0))\n self._4D_world.addCamera(self._4D_camera, (0.0, 0.0, 0.0, 0.0))\n # Place a screen displaying the 4D camera in the 3D world\n cube_screen = CubeScreen.CubeScreen(cam=self._4D_camera)\n self._3D_world.addObject(cube_screen, (0.0, 0.0, 1.0))\n\n def playGame(self):\n now = time.time()\n while True:\n\n lastframe = now\n inputs = {}\n #while now - (1.0/self.MAX_FPS) < lastframe:\n # inputs.update(self.getInputs())\n # now = time.time()\n inputs.update(self.getInputs())\n if '' in repr(inputs):\n exit(0)\n print(inputs)\n self.update(inputs)\n self.render()\n\n def getInputs(self):\n inputs = pygame.event.get()\n input_dict = {'inputs': inputs}\n return input_dict\n\n def update(self, inputs):\n self._4D_world.update(inputs)\n self._3D_world.update(inputs)\n\n def render(self):\n cam_array = self._3D_world.getCameraView(0)\n int_arr = cam_array.flatten()\n int_arr2 = numpy.array([(a, b, c) for a,b,c in int_arr])\n\n new_arr = numpy.reshape(int_arr2, self._resolution+(3,))\n pygame.pixelcopy.array_to_surface(self._screen, new_arr)\n\n\ndef main():\n game = Game4D()\n game.playGame()\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "4DGame/Game4D.py", "file_name": "Game4D.py", "file_ext": "py", "file_size_in_byte": 1856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "World.World", "line_number": 17, "usage_type": "call"}, {"api_name": "Camera.Camera", "line_number": 18, "usage_type": "call"}, {"api_name": "World.World", "line_number": 19, "usage_type": "call"}, {"api_name": "Camera.Camera", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "CubeScreen.CubeScreen", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.pixelcopy.array_to_surface", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.pixelcopy", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "642060546", "text": "from django.conf.urls import url\nfrom django.urls import path\n\nfrom chanlun import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n path('chat//', views.room, name='room'),\n url(r'^dizzy$', views.dizzy, name='dizzy'),\n url(r'^ks', views.ks, name='ks'),\n url(r'^bars', views.get_bars, name='get_bars'),\n url(r'^securities', views.get_all_securities, name='get_all_securities'),\n]\n", "sub_path": "chanlun/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 427, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "chanlun.views.index", "line_number": 7, "usage_type": "attribute"}, {"api_name": "chanlun.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "chanlun.views.room", "line_number": 8, "usage_type": "attribute"}, {"api_name": "chanlun.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "chanlun.views.dizzy", "line_number": 9, "usage_type": "attribute"}, {"api_name": "chanlun.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "chanlun.views.ks", "line_number": 10, "usage_type": "attribute"}, {"api_name": "chanlun.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "chanlun.views.get_bars", "line_number": 11, "usage_type": "attribute"}, {"api_name": "chanlun.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "chanlun.views.get_all_securities", "line_number": 12, "usage_type": "attribute"}, {"api_name": "chanlun.views", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "268836025", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 28 11:15:50 2020\n\n@author: samuel\n\"\"\"\n\nimport DDQNAgent\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport YpacaraiMap\nfrom torch import save\n\nfrom tqdm import tqdm\n\n# Definimos los hiperparámetros #\n\nsteps = 300\nepochs = 1500\ngamma = 0.95\nepsilon = 0.99\nlr = 1e-3\nn_actions = 8\nmem_size = 10000\nbatch_size = 250\neps_min = 0.01\neps_dec = (epsilon-eps_min)/1400\nreplace = 50\ntimesteps = 5\ninput_dims = (timesteps,3,25,19)\n\n# Creamos el agente #\n\nagente = DDQNAgent.DDQNAgent(gamma, epsilon, lr, n_actions, input_dims, \n mem_size, batch_size, eps_min, eps_dec, replace)\n\n# Inicializamos el escenario #\n\nenv = YpacaraiMap.Environment()\n\n# Wrapper para seleccionar qué es el estado #\ndef do_step(env,action,ext_state):\n \n obs, reward, done, info = env.step(action)\n \n state = env.render()\n \n for t in range(timesteps-1):\n ext_state[t] = ext_state[t+1]\n \n ext_state[timesteps-1] = state\n \n return ext_state, reward, done, info\n \ndef reset(env):\n \n env.reset()\n \n state = env.render()\n \n ext_state = np.zeros((timesteps,3,25,19))\n \n for t in range(timesteps):\n \n ext_state[t] = state \n \n return ext_state\n\n# Semillas #\nnp.random.seed(42)\n\n# Creamos la figura #\n\nfig = plt.figure(figsize=(8, 4))\nfig.show()\nfig.canvas.draw()\nplt.xlim([0,epochs])\nplt.grid(True, which = 'both')\n\nfiltered_reward = 0\nfiltered_reward_buffer = []\nreward_buffer = []\n\nrecord = -100000\n\n# Comenzamos el entrenamiento #\n\nfor epoch in tqdm(range(0,epochs)):\n \n state = reset(env)\n rew_episode = 0\n\n # Mermamos epsilon#\n agente.decrement_epsilon()\n \n for step in range(steps):\n \n # Llamamos a la política de comportamiento #\n action = agente.choose_action_epsilon_greedy(state)\n \n # Aplicamos la acción escogida #\n next_state, reward, done, info = do_step(env,action,state)\n \n # Guardamos la experiencia #\n agente.store_transition(state,action,reward,next_state,done)\n \n # El estado anterior pasa a ser el actual #\n state = next_state\n \n # Acumulamos la recompensa total #\n rew_episode += reward\n \n # Entrenamos. Si no hay suficientes experiencias, learn() retorna #\n agente.learn()\n \n # Actualizamos la red del target (si es que toca) #\n agente.replace_target_network(epoch)\n\n if epoch == 0:\n filtered_reward = rew_episode\n else:\n filtered_reward = rew_episode*0.05 + filtered_reward*0.95\n \n reward_buffer.append(rew_episode)\n filtered_reward_buffer.append(filtered_reward)\n\n if(record < rew_episode):\n print('Nuevo record de {:06.2f} en el episodio {:d}\\n'.format(rew_episode,epoch))\n record = rew_episode\n save(agente.q_eval, \"DDQN_BEST.pt\")\n\n # Dibujamos la recompensa #\n plt.plot(reward_buffer,'b',alpha=0.2)\n plt.plot(filtered_reward_buffer,'r')\n plt.pause(0.001)\n fig.canvas.draw()\n \n\nprint('Entrenamiento terminado!')\n\nsave(agente.q_eval, \"DDQN_LAST.pt\")\n \n \n\n", "sub_path": "Algoritmos/PyTorch_RL/PyTorch_DDQN_LSTM/DDQN.py", "file_name": "DDQN.py", "file_ext": "py", "file_size_in_byte": 3171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "DDQNAgent.DDQNAgent", "line_number": 35, "usage_type": "call"}, {"api_name": "YpacaraiMap.Environment", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 71, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "466981729", "text": "## This code was written in order to take a compressed zeroes N42 file and translates the channel data into an xlsx file with the correct number of channels\r\n## by A. Dantley\r\n## Completed date: 9/17/2019\r\n\r\n################################################\r\n### Breakdown\r\n################################################\r\n\r\n### input - N42 file with counted zero compression on channel data.\r\n\r\n### output - xlsx file with counted zero compression removed\r\n\r\n\r\n################################################\r\n### Selecting File to Import From\r\n################################################\r\n\r\nimport os.path\r\nfrom xml.dom import minidom\r\nimport xlsxwriter\r\nimport pandas as pd\r\nimport sys\r\n\r\nfile_path = input(\"Insert full path of file to decompress: \")\r\n\r\nif os.path.exists(file_path) == False:\r\n print(\"Please Check file name again.\")\r\n sys.exit()\r\nelse:\r\n \r\n path_split = file_path.split(\"/\")\r\n original_file_name = str(path_split[-1])\r\n original_file_name_split = original_file_name.split(\".\")\r\n new_filename = str(original_file_name_split[0]) + \"_translated.\" + str(original_file_name_split[1])\r\n\r\n ################################################\r\n ### Channel Data Import\r\n ################################################\r\n\r\n N42_parsed = minidom.parse(file_path)\r\n Channel_Data = N42_parsed.getElementsByTagName('ChannelData')\r\n spectra = []\r\n translated_workbook = xlsxwriter.Workbook(str(new_filename) + \".xlsx\")\r\n translated_worksheet = translated_workbook.add_worksheet()\r\n\r\n for elem in Channel_Data:\r\n spectra.append(elem.firstChild.data)\r\n\r\n ################################################\r\n ### Inserting Zeroes\r\n ################################################\r\n\r\n sample = elem.firstChild.data\r\n split = sample.split(\" \")\r\n long_output = \"\"\r\n \r\n\r\n\r\n zero_indices = [i for i,j in enumerate(split) if j ==\"0\"]\r\n next_up = [item + 1 for item in zero_indices]\r\n \r\n for index in next_up:\r\n zeroes = \"\"\r\n i = 0\r\n\r\n while i != int(split[index]): \r\n if i == (int(split[index]) - 1):\r\n zeroes = zeroes + \"0\"\r\n i= i+1\r\n\r\n else:\r\n zeroes = zeroes + \"0 \"\r\n i = i+1\r\n \r\n split[index] = zeroes\r\n \r\n for i in zero_indices:\r\n split.remove(\"0\")\r\n\r\n for i in split:\r\n long_output = long_output + i + \" \"\r\n\r\n ################################################\r\n ### Creating Rows in DataFrame\r\n ################################################\r\n\r\n list_formatted_spectra = long_output.split(\" \")\r\n \r\n################################################\r\n### Exporting DataFrame as a xlsx file\r\n################################################\r\n spectra_number = Channel_Data.index(elem) + 1\r\n j = Channel_Data.index(elem)\r\n i = 1\r\n\r\n translated_worksheet.write(\"A\" + str(spectra_number), str(\"Spectra \" + str(spectra_number))) \r\n while i < list_formatted_spectra.__len__():\r\n translated_worksheet.write(j, i, list_formatted_spectra[i-1])\r\n i = i +1\r\n \r\n translated_workbook.close()\r\n", "sub_path": "CountedZeroesN42Decompression/Counted_Zeroes_Decompression_v1 n42 to xlsx.py", "file_name": "Counted_Zeroes_Decompression_v1 n42 to xlsx.py", "file_ext": "py", "file_size_in_byte": 3329, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 40, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 40, "usage_type": "name"}, {"api_name": "xlsxwriter.Workbook", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "587219777", "text": "from helpers import MessageLogger\nfrom helpers import get_ext_ip\nfrom settings import nickname\n\nfrom twisted.words.protocols import irc\n\nimport time\n\n\nclass Bot(irc.IRCClient):\n \"\"\"\n Our own IRC BOT\n \"\"\"\n\n def __init__(self):\n self.nickname = nickname\n\n def connectionMade(self):\n \"\"\"\n At the initial connection we log the time\n \"\"\"\n irc.IRCClient.connectionMade(self)\n self.logger = MessageLogger(open(self.factory.filename, \"a\"))\n self.logger.log(\"[connected at %s]\" %\n time.asctime(time.localtime(time.time())))\n\n def connectionLost(self, reason):\n \"\"\"\n At closing we log that we disconnected\n \"\"\"\n irc.IRCClient.connectionLost(self, reason)\n self.logger.log(\"[disconnected at %s]\" %\n time.asctime(time.localtime(time.time())))\n self.logger.close()\n\n def signedOn(self):\n \"\"\"\n Called when bot is successfully signed on the server\n \"\"\"\n self.join(self.factory.channel)\n\n def joined(self, channel):\n \"\"\"\n Called when bot has joined a channel\n \"\"\"\n self.logger.log(\"[I have joined %s]\" % channel)\n\n def privmsg(self, user, channel, msg):\n \"\"\"\n This is called whenever bit receives a message.\n\n If channel == self.nickname it means that this is a private message\n \"\"\"\n\n user = user.split('!', 1)[0]\n\n if channel == self.nickname:\n self.logger.log(\"[private message]<%s> %s\" % (user, msg))\n msg = \"Wanna get private ah? Better not, after all i'm just a bot\"\n self.msg(user, msg)\n self.logger.log(\"<%s> %s\" % (self.nickname, msg))\n return\n\n self.logger.log(\"<%s> %s\" % (user, msg))\n\n if msg.startswith(self.nickname + \":\"):\n #msg = \"%s: I am a log bot\" % user\n msg = self.parse_command(user, msg)\n self.msg(channel, msg)\n self.logger.log(\"<%s> %s\" % (self.nickname, msg))\n\n def action(self, user, channel, msg):\n \"\"\"\n Whenever an action is performed from a user it gets logged in\n \"\"\"\n user = user.split('!', 1)[0]\n self.logger.log(\"* %s %s\" % (user, msg))\n\n def irc_NICK(self, prefix, params):\n \"\"\"\n Called when an IRC user changes their nickname.\n \"\"\"\n old_nick = prefix.split('!')[0]\n new_nick = params[0]\n self.logger.log(\"%s is now known as %s\" % (old_nick, new_nick))\n\n def parse_command(self, user, msg):\n if \"--help\" in msg:\n with open('.botmanpage') as f:\n answer = f.read()\n return answer\n elif \"--nasty\" in msg:\n return \"You nasty nasty dirty goat\"\n elif \"--myip\" in msg:\n ip = get_ext_ip()\n self.msg(user, ip)\n return \";-)\"\n else:\n answer = \"Hey, i am just a bot.\\n: --help\\nto see options\"\n return answer\n", "sub_path": "bot/Bot.py", "file_name": "Bot.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "twisted.words.protocols.irc.IRCClient", "line_number": 10, "usage_type": "attribute"}, {"api_name": "twisted.words.protocols.irc", "line_number": 10, "usage_type": "name"}, {"api_name": "settings.nickname", "line_number": 16, "usage_type": "name"}, {"api_name": "twisted.words.protocols.irc.IRCClient.connectionMade", "line_number": 22, "usage_type": "call"}, {"api_name": "twisted.words.protocols.irc.IRCClient", "line_number": 22, "usage_type": "attribute"}, {"api_name": "twisted.words.protocols.irc", "line_number": 22, "usage_type": "name"}, {"api_name": "helpers.MessageLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "time.asctime", "line_number": 25, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 25, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "twisted.words.protocols.irc.IRCClient.connectionLost", "line_number": 31, "usage_type": "call"}, {"api_name": "twisted.words.protocols.irc.IRCClient", "line_number": 31, "usage_type": "attribute"}, {"api_name": "twisted.words.protocols.irc", "line_number": 31, "usage_type": "name"}, {"api_name": "time.asctime", "line_number": 33, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "helpers.get_ext_ip", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "76435313", "text": "# -*- coding: utf-8 -*-\r\n\r\n# --------------------------------------------------------------------------\r\n# Copyright Commvault Systems, Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# --------------------------------------------------------------------------\r\n\r\n\"\"\"File for performing domain related operations.\r\n\r\n\r\nDomains: Class for representing all the associated domains with the commcell.\r\n\r\n\r\nDomains:\r\n\r\n __init__(commcell_object) -- initialize instance of the Domains associated with\r\n the specified commcell\r\n\r\n __str__() -- returns all the domains associated with the commcell\r\n\r\n __repr__() -- returns the string for the instance of the Domains class\r\n\r\n __len__() -- returns the number of domains associated with the Commcell\r\n\r\n __getitem__() -- returns the name of the domain for the given domain Id\r\n or the details for the given domain name\r\n\r\n _get_domains() -- gets all the domains associated with the commcell specified\r\n\r\n all_domains() -- returns the dict of all the domanin configured\r\n\r\n has_domain() -- checks if a domain exists with the given name or not\r\n\r\n get(domain_name) -- returns the instance of the Domain class,\r\n for the the input domain name\r\n\r\n delete(domain_name) -- deletes the domain from the commcell\r\n\r\n refresh() -- refresh the domains associated with the commcell\r\n\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import unicode_literals\r\n\r\nfrom base64 import b64encode\r\nfrom past.builtins import basestring\r\n\r\nfrom .exception import SDKException\r\n\r\n\r\nclass Domains(object):\r\n \"\"\"Class for getting all the domains associated with a commcell.\"\"\"\r\n\r\n def __init__(self, commcell_object):\r\n \"\"\"Initialize object of the Domains class.\r\n\r\n Args:\r\n commcell_object (object) -- instance of the Commcell class\r\n\r\n Returns:\r\n object - instance of the Domains class\r\n\r\n \"\"\"\r\n self._commcell_object = commcell_object\r\n\r\n self._cvpysdk_object = commcell_object._cvpysdk_object\r\n self._services = commcell_object._services\r\n self._update_response_ = commcell_object._update_response_\r\n\r\n self._DOMAIN_CONTROLER = self._services['DOMAIN_CONTROLER']\r\n\r\n self._domains = None\r\n self.refresh()\r\n\r\n def __str__(self):\r\n \"\"\"Representation string consisting of all domains of the Commcell.\r\n\r\n Returns:\r\n str - string of all the domains for a commcell\r\n\r\n \"\"\"\r\n representation_string = \"{:^5}\\t{:^50}\\n\\n\".format('S. No.', 'Domain')\r\n\r\n for index, domain_name in enumerate(self._domains):\r\n sub_str = '{:^5}\\t{:30}\\n'.format(index + 1, domain_name)\r\n representation_string += sub_str\r\n\r\n return representation_string.strip()\r\n\r\n def __repr__(self):\r\n \"\"\"Representation string for the instance of the Domains class.\"\"\"\r\n return \"Domains class instance for Commcell: '{0}'\".format(\r\n self._commcell_object.commserv_name\r\n )\r\n\r\n def __len__(self):\r\n \"\"\"Returns the number of the domains associated to the Commcell.\"\"\"\r\n return len(self.all_domains)\r\n\r\n def __getitem__(self, value):\r\n \"\"\"Returns the name of the domain for the given domain ID or\r\n the details of the domain for given domain Name.\r\n\r\n Args:\r\n value (str / int) -- Name or ID of the domain\r\n\r\n Returns:\r\n str - name of the domain, if the domain id was given\r\n\r\n dict - dict of details of the domain, if domain name was given\r\n\r\n Raises:\r\n IndexError:\r\n no domain exists with the given Name / Id\r\n\r\n \"\"\"\r\n value = str(value)\r\n\r\n if value in self.all_domains:\r\n return self.all_domains[value]\r\n else:\r\n try:\r\n return list(filter(lambda x: x[1]['id'] == value, self.all_domains.items()))[0][0]\r\n except IndexError:\r\n raise IndexError('No domain exists with the given Name / Id')\r\n\r\n def _get_domains(self):\r\n \"\"\"Gets all the domains associated with the commcell\r\n\r\n Returns:\r\n dict - consists of all domain in the commcell\r\n\r\n {\r\n \"domain1_name\": domain_Details_dict1,\r\n\r\n \"domain2_name\": domain_Details_dict2\r\n }\r\n\r\n Raises:\r\n SDKException:\r\n if response is empty\r\n\r\n if response is not success\r\n\r\n \"\"\"\r\n flag, response = self._cvpysdk_object.make_request('GET', self._DOMAIN_CONTROLER)\r\n\r\n if flag:\r\n domains_dict = {}\r\n\r\n if response.json() and 'providers' in response.json():\r\n response_value = response.json()['providers']\r\n\r\n for temp in response_value:\r\n temp_name = temp['shortName']['domainName'].lower()\r\n temp_details = temp\r\n domains_dict[temp_name] = temp_details\r\n\r\n return domains_dict\r\n else:\r\n response_string = self._update_response_(response.text)\r\n raise SDKException('Response', '101', response_string)\r\n\r\n @property\r\n def all_domains(self):\r\n \"\"\"Returns the domains configured on this commcell\r\n\r\n dict - consists of all domain in the commcell\r\n\r\n {\r\n \"domain1_name\": domain_Details_dict1,\r\n\r\n \"domain2_name\": domain_Details_dict2\r\n }\r\n \"\"\"\r\n return self._domains\r\n\r\n def has_domain(self, domain_name):\r\n \"\"\"Checks if a domain exists in the commcell with the input domain name.\r\n\r\n Args:\r\n domain_name (str) -- name of the domain\r\n\r\n Returns:\r\n bool - boolean output whether the domain exists in the commcell or not\r\n\r\n Raises:\r\n SDKException:\r\n if type of the domain name argument is not string\r\n\r\n \"\"\"\r\n if not isinstance(domain_name, basestring):\r\n raise SDKException('Domain', '101')\r\n\r\n return self._domains and domain_name.lower() in self._domains\r\n\r\n def get(self, domain_name):\r\n \"\"\"Returns a domain object of the specified domain name.\r\n\r\n Args:\r\n domain_name (str) -- name of the domain\r\n\r\n Returns:\r\n dict - properties of domain.\r\n\r\n Raises:\r\n SDKException:\r\n if type of the domain name argument is not string\r\n\r\n if no domain exists with the given name\r\n\r\n \"\"\"\r\n if not isinstance(domain_name, basestring):\r\n raise SDKException('Domain', '101')\r\n else:\r\n domain_name = domain_name.lower()\r\n\r\n if self.has_domain(domain_name):\r\n return self._domains[domain_name]\r\n else:\r\n raise SDKException(\r\n 'Domain', '102', 'No domain exists with name: {0}'.format(domain_name)\r\n )\r\n\r\n def delete(self, domain_name):\r\n \"\"\"Deletes the domain from the commcell.\r\n\r\n Args:\r\n domain_name (str) -- name of the domain to remove from the commcell\r\n\r\n Raises:\r\n SDKException:\r\n if type of the domain name argument is not string\r\n\r\n if failed to delete domain\r\n\r\n if response is empty\r\n\r\n if response is not success\r\n\r\n if no domain exists with the given name\r\n\r\n \"\"\"\r\n\r\n if not isinstance(domain_name, basestring):\r\n raise SDKException('Domain', '101')\r\n else:\r\n domain_name = domain_name.lower()\r\n\r\n if self.has_domain(domain_name):\r\n domain_id = str(self._domains[domain_name][\"shortName\"][\"id\"])\r\n delete_domain = self._services['DELETE_DOMAIN_CONTROLER'] % (domain_id)\r\n\r\n flag, response = self._cvpysdk_object.make_request('DELETE', delete_domain)\r\n\r\n if flag:\r\n if response.json() and 'errorCode' in response.json():\r\n error_code = response.json()[\"errorCode\"]\r\n\r\n if error_code == 0:\r\n # initialize the domain again\r\n # so the domains object has all the domains\r\n self.refresh()\r\n else:\r\n o_str = ('Failed to delete domain with error code: \"{0}\"'\r\n '\\nPlease check the documentation for '\r\n 'more details on the error')\r\n raise SDKException(\r\n 'Domain', '102', o_str.format(error_code)\r\n )\r\n else:\r\n raise SDKException('Response', '102')\r\n else:\r\n response_string = self._update_response_(response.text)\r\n raise SDKException('Response', '101', response_string)\r\n else:\r\n raise SDKException(\r\n 'Domain', '102', 'No domain exists with name: {0}'.format(domain_name)\r\n )\r\n\r\n def refresh(self):\r\n \"\"\"Refresh the domains associated with the Commcell.\"\"\"\r\n self._domains = self._get_domains()\r\n\r\n def add(self,\r\n domain_name,\r\n netbios_name,\r\n user_name,\r\n password,\r\n company_id=\"\",\r\n ad_proxy_list=None,\r\n enable_sso=True):\r\n \"\"\"Adds a new domain to the commcell.\r\n\r\n Args:\r\n domain_name (str) -- name of the domain\r\n\r\n netbios_name (str) -- netbios name of the domain\r\n\r\n user_name (str) -- user name of the domain\r\n\r\n password (str) -- password of the domain\r\n\r\n company_id (int) -- company id for which the domain needs to be added for\r\n\r\n adProxyList (list) -- list of client objects to be used as proxy.\r\n\r\n default: None\r\n\r\n if no proxy required\r\n\r\n enable_sso (bool) -- enable sso for domain\r\n\r\n Returns:\r\n dict - properties of domain\r\n\r\n Raises:\r\n SDKException:\r\n if type of the domain name argument is not string\r\n\r\n if no domain exists with the given name\r\n\r\n \"\"\"\r\n if not (isinstance(domain_name, basestring) and\r\n isinstance(netbios_name, basestring) and\r\n isinstance(user_name, basestring) and\r\n isinstance(password, basestring)):\r\n raise SDKException('Domain', '101')\r\n else:\r\n domain_name = domain_name.lower()\r\n\r\n if self.has_domain(domain_name):\r\n return self._domains[domain_name]\r\n\r\n proxy_information = {}\r\n\r\n if ad_proxy_list:\r\n if isinstance(ad_proxy_list, list):\r\n proxy_information = {\r\n 'adProxyList': [{\"clientName\": client} for client in ad_proxy_list]\r\n }\r\n else:\r\n raise SDKException('Domain', '101')\r\n\r\n domain_create_request = {\r\n \"operation\": 1,\r\n \"provider\": {\r\n \"serviceType\": 2,\r\n \"flags\": 1,\r\n \"bPassword\": b64encode(password.encode()).decode(),\r\n \"login\": user_name,\r\n \"enabled\": 1,\r\n \"useSecureLdap\": 0,\r\n \"connectName\": domain_name,\r\n \"bLogin\": user_name,\r\n \"ownerCompanyId\": company_id,\r\n \"tppm\": {\r\n \"enable\": True if ad_proxy_list else False,\r\n \"tppmType\": 4,\r\n \"proxyInformation\": proxy_information\r\n },\r\n \"shortName\": {\r\n \"domainName\": netbios_name\r\n }\r\n }\r\n }\r\n\r\n flag, response = self._cvpysdk_object.make_request(\r\n 'POST', self._DOMAIN_CONTROLER, domain_create_request\r\n )\r\n\r\n if flag:\r\n if response.json() and 'errorCode' in response.json():\r\n error_code = response.json()[\"errorCode\"]\r\n\r\n if error_code == 0:\r\n # initialize the domain again\r\n # so the domains object has all the domains\r\n self.refresh()\r\n else:\r\n o_str = ('Failed to add domain with error code: \"{0}\"'\r\n '\\nPlease check the documentation for '\r\n 'more details on the error')\r\n raise SDKException(\r\n 'Domain', '102', o_str.format(error_code)\r\n )\r\n else:\r\n raise SDKException('Response', '102')\r\n else:\r\n response_string = self._update_response_(response.text)\r\n raise SDKException('Response', '101', response_string)\r\n", "sub_path": "cvpysdk/domains.py", "file_name": "domains.py", "file_ext": "py", "file_size_in_byte": 14128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "exception.SDKException", "line_number": 175, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 205, "usage_type": "argument"}, {"api_name": "exception.SDKException", "line_number": 206, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 226, "usage_type": "argument"}, {"api_name": "exception.SDKException", "line_number": 227, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 234, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 258, "usage_type": "argument"}, {"api_name": "exception.SDKException", "line_number": 259, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 281, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 285, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 288, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 290, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 337, "usage_type": "argument"}, {"api_name": "past.builtins.basestring", "line_number": 338, "usage_type": "argument"}, {"api_name": "past.builtins.basestring", "line_number": 339, "usage_type": "argument"}, {"api_name": "past.builtins.basestring", "line_number": 340, "usage_type": "argument"}, {"api_name": "exception.SDKException", "line_number": 341, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 356, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 363, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 397, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 401, "usage_type": "call"}, {"api_name": "exception.SDKException", "line_number": 404, "usage_type": "call"}]} +{"seq_id": "393310390", "text": "#!/usr/bin/env python3\nimport sys\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\nfrom keras.utils import plot_model\n\nclass GAN(object):\n def __init__(self,discriminator,generator):\n self.OPTIMIZER = Adam(lr=0.0002, decay=8e-9)\n \n self.Generator = generator\n\n self.Discriminator = discriminator\n self.Discriminator.trainable = False\n \n self.gan_model = self.model()\n self.gan_model.compile(loss='binary_crossentropy', optimizer=self.OPTIMIZER)\n self.save_model()\n self.summary()\n\n def model(self):\n model = Sequential()\n model.add(self.Generator)\n model.add(self.Discriminator)\n return model\n\n def summary(self):\n return self.gan_model.summary()\n\n def save_model(self):\n plot_model(self.gan_model.model, to_file='/data/GAN_Model.png')\n", "sub_path": "Chapter3/full-gan/gan.py", "file_name": "gan.py", "file_ext": "py", "file_size_in_byte": 907, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.optimizers.Adam", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.utils.plot_model", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "223756753", "text": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport hashlib\nimport requests\nimport tempfile\nimport tarfile\n\n__all__ = ['MODULE_HOME', 'download', 'md5file', 'download_and_uncompress']\n\n# TODO(ZeyuChen) add environment varialble to set MODULE_HOME\nMODULE_HOME = os.path.expanduser('~/.cache/paddle/module')\n\n\n# When running unit tests, there could be multiple processes that\n# trying to create MODULE_HOME directory simultaneously, so we cannot\n# use a if condition to check for the existence of the directory;\n# instead, we use the filesystem as the synchronization mechanism by\n# catching returned errors.\ndef must_mkdirs(path):\n try:\n os.makedirs(MODULE_HOME)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n pass\n\n\ndef md5file(fname):\n hash_md5 = hashlib.md5()\n f = open(fname, \"rb\")\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n f.close()\n return hash_md5.hexdigest()\n\n\ndef download_and_uncompress(url, save_name=None):\n module_name = url.split(\"/\")[-2]\n dirname = os.path.join(MODULE_HOME, module_name)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n #TODO(ZeyuChen) add download md5 file to verify file completeness\n file_name = os.path.join(\n dirname,\n url.split('/')[-1] if save_name is None else save_name)\n\n retry = 0\n retry_limit = 3\n while not (os.path.exists(file_name)):\n if os.path.exists(file_name):\n print(\"file md5\", md5file(file_name))\n if retry < retry_limit:\n retry += 1\n else:\n raise RuntimeError(\n \"Cannot download {0} within retry limit {1}\".format(\n url, retry_limit))\n print(\"Cache file %s not found, downloading %s\" % (file_name, url))\n r = requests.get(url, stream=True)\n total_length = r.headers.get('content-length')\n\n if total_length is None:\n with open(file_name, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n else:\n #TODO(ZeyuChen) upgrade to tqdm process\n with open(file_name, 'wb') as f:\n dl = 0\n total_length = int(total_length)\n for data in r.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\n \"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()\n\n print(\"file download completed!\", file_name)\n #TODO(ZeyuChen) add md5 check error and file incompleted error, then raise\n # them and catch them\n with tarfile.open(file_name, \"r:gz\") as tar:\n file_names = tar.getnames()\n print(file_names)\n module_dir = os.path.join(dirname, file_names[0])\n for file_name in file_names:\n tar.extract(file_name, dirname)\n\n return module_name, module_dir\n\n\nif __name__ == \"__main__\":\n # TODO(ZeyuChen) add unit test\n link = \"http://paddlehub.bj.bcebos.com/word2vec/word2vec-dim16-simple-example-1.tar.gz\"\n\n module_path = download_and_uncompress(link)\n print(\"module path\", module_path)\n", "sub_path": "paddle_hub/downloader.py", "file_name": "downloader.py", "file_ext": "py", "file_size_in_byte": 3946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.expanduser", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 96, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}]} +{"seq_id": "501799816", "text": "# -*- coding: utf-8 -*-\n\nfrom iprofile import texts\nfrom iprofile.core.decorators import icommand\nfrom iprofile.core.models import ICommand\nimport click\n\n\n@icommand(help=texts.HELP_LIST, short_help=texts.HELP_LIST)\n@click.option('--names-only', is_flag=True, help=texts.HELP_NAMES_ONLY)\nclass List(ICommand):\n\n def run(self, **options):\n profiles_path = self.settings.get('path')\n profiles_list = self.list_profiles(profiles_path)\n\n if not profiles_list:\n self.red(texts.ERROR_NO_PROFILES_TO_LIST)\n return\n\n active = self.settings.get('active')\n qtt_profiles = len(profiles_list)\n qtt_text = texts.LOG_QTT_PROFILES.format(\n qtt_profiles,\n 's' if qtt_profiles > 1 else '',\n 'were' if qtt_profiles > 1 else 'was'\n )\n\n if not options.get('names_only'):\n self.green(qtt_text)\n\n for profile in profiles_list:\n if active and profile and ':' in active and ':' in profile:\n name, project = profile.split(':')\n active_name, active_project = active.split(':')\n if active_name == name and active_project == project:\n self.pgreen(profile)\n else:\n click.echo(profile)\n elif active == profile:\n self.pgreen(profile)\n else:\n click.echo(profile)\n", "sub_path": "iprofile/cli/list.py", "file_name": "list.py", "file_ext": "py", "file_size_in_byte": 1423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "iprofile.core.models.ICommand", "line_number": 11, "usage_type": "name"}, {"api_name": "iprofile.texts.ERROR_NO_PROFILES_TO_LIST", "line_number": 18, "usage_type": "attribute"}, {"api_name": "iprofile.texts", "line_number": 18, "usage_type": "name"}, {"api_name": "iprofile.texts.LOG_QTT_PROFILES.format", "line_number": 23, "usage_type": "call"}, {"api_name": "iprofile.texts.LOG_QTT_PROFILES", "line_number": 23, "usage_type": "attribute"}, {"api_name": "iprofile.texts", "line_number": 23, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 39, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 43, "usage_type": "call"}, {"api_name": "iprofile.core.decorators.icommand", "line_number": 9, "usage_type": "call"}, {"api_name": "iprofile.texts.HELP_LIST", "line_number": 9, "usage_type": "attribute"}, {"api_name": "iprofile.texts", "line_number": 9, "usage_type": "name"}, {"api_name": "click.option", "line_number": 10, "usage_type": "call"}, {"api_name": "iprofile.texts.HELP_NAMES_ONLY", "line_number": 10, "usage_type": "attribute"}, {"api_name": "iprofile.texts", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "20679414", "text": "import unittest\nimport points_api\nimport xmlrunner\nimport os\nimport inspect\n\nfrom point_manager import PointManager\nfrom sqlalchemy import create_engine\nfrom base import Base\n\n#sean\nclass TestPointApi(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tengine = create_engine('sqlite:///test_points.sqlite')\n\t\t# Creates all the tables\n\t\tBase.metadata.create_all(engine)\n\t\tBase.metadata.bind = engine\n\t\tpoints_api.point_mgr = PointManager('test_points.sqlite')\n\t\tpoints_api.app.testing = True\n\t\tself.app = points_api.app.test_client()\n\t\tself.logPoint()\n\t\n\tdef tearDown(self):\n\t\tos.remove('test_points.sqlite')\n\t\tself.logPoint()\n\t\n\tdef logPoint(self):\n\t\tcurrentTest = self.id().split('.')[-1]\n\t\tcallingFunction = inspect.stack()[1][3]\n\t\tprint('in %s - %s()' % (currentTest, callingFunction))\n\t\n\tdef test_points_all(self):\n\t\trv = self.app.get('/points/all')\n\t\tself.assertEqual(rv.status, '200 OK')\n\n\tdef test_post_get_point(self):\n\t\trv_post = self.app.post('/points', json={ \"x\":5, \"y\":5}, headers={\"content-type\":\"application/json\"})\n\t\tself.assertEqual(rv_post.status, '200 OK')\n\t\trv_get = self.app.get('/points/all')\n\t\tself.assertEqual(rv_get.status, '200 OK')\n\n\nif __name__ == \"__main__\":\n\trunner = xmlrunner.XMLTestRunner(output='api-test-reports')\n\tunittest.main(testRunner=runner)\n\tunittest.main()\n", "sub_path": "gitlabCE/pointpipeline-main/test_points_api.py", "file_name": "test_points_api.py", "file_ext": "py", "file_size_in_byte": 1291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 15, "usage_type": "call"}, {"api_name": "base.Base.metadata.create_all", "line_number": 17, "usage_type": "call"}, {"api_name": "base.Base.metadata", "line_number": 17, "usage_type": "attribute"}, {"api_name": "base.Base", "line_number": 17, "usage_type": "name"}, {"api_name": "base.Base.metadata", "line_number": 18, "usage_type": "attribute"}, {"api_name": "base.Base", "line_number": 18, "usage_type": "name"}, {"api_name": "points_api.point_mgr", "line_number": 19, "usage_type": "attribute"}, {"api_name": "point_manager.PointManager", "line_number": 19, "usage_type": "call"}, {"api_name": "points_api.app", "line_number": 20, "usage_type": "attribute"}, {"api_name": "points_api.app.test_client", "line_number": 21, "usage_type": "call"}, {"api_name": "points_api.app", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 25, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 30, "usage_type": "call"}, {"api_name": "xmlrunner.XMLTestRunner", "line_number": 45, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 46, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "117312602", "text": "\nimport numpy as np\nfrom neuron import h\n\n__all__ = ['Spine']\n\n\ndef point_at_distance(origin, dst, m, q):\n v1 = origin\n v2 = np.array([v1[0]+1, m*(v1[0]+1) + q])\n u = (v2 - v1) / np.linalg.norm(v2 - v1)\n v3 = v1 + dst * u\n return v3\n\n\ndef make_spine_coords(points, lengths):\n n_points = int((points.shape[0] - 1) / 2)\n n_dims = points.shape[1]\n center = points[n_points,:]\n if np.all(points[:,0] == points[0,0]):\n m = 0\n elif np.all(points[:,1] == points[0,1]):\n # the dendrite is parallel to the x-axis\n n_points = len(lengths)\n spine_points = np.tile(center, [n_points,1])\n for i in range(n_points):\n spine_points[i,-1] = lengths[i]\n return spine_points\n else:\n p = np.polyfit(points[:,0], points[:,1], 1)\n m = -1 / p[0]\n q = center[1] - m * center[0]\n n_points = len(lengths)\n spine_points = np.zeros((n_points, n_dims))\n for i in range(n_points):\n spine_points[i,:2] = point_at_distance(center[:2], lengths[i], m, q)\n if n_dims == 3:\n spine_points[:,2] = center[2]\n return spine_points\n\n\nclass Spine (object):\n def __init__(self, sec, x, head_L, head_diam, neck_L, neck_diam=None, Ra=None, spine_id=None):\n n_points = sec.n3d()\n coords = np.zeros((n_points, 3))\n diams = np.zeros(n_points)\n norm_arclength = np.zeros(n_points)\n for i in range(n_points):\n coords[i,:] = np.array([sec.x3d(i),\\\n sec.y3d(i),\\\n sec.z3d(i)])\n diams[i] = sec.diam3d(i)\n norm_arclength[i] = sec.arc3d(i) / sec.L\n idx = np.argmin(np.abs(norm_arclength - x))\n N = 3\n start = np.max([idx-N, 0])\n stop = np.min([idx+N+1, coords.shape[0]])\n points = coords[start : stop, :]\n lengths = diams[idx] / 2 + np.array([0, neck_L, neck_L, head_L+neck_L])\n self._points = make_spine_coords(points, lengths)\n if neck_diam is None:\n neck_diam = diams[idx]\n self._diams = np.array([neck_diam, neck_diam, head_diam, head_diam])\n self._sec = sec\n self._sec_x = norm_arclength[idx]\n\n if Ra is not None:\n self._Ra = Ra\n else:\n self._Ra = self._sec.Ra\n\n if spine_id is not None:\n self._id = '-{}'.format(spine_id)\n else:\n self._id = ''\n\n def instantiate(self):\n self.neck = h.Section(name = 'neck' + self._id)\n self.head = h.Section(name = 'head' + self._id)\n self.neck.nseg = 1\n self.head.nseg = 1\n self.geometry()\n self.connect()\n self.biophysics()\n\n def geometry(self):\n # spine neck\n xvec = h.Vector(self._points[:2,0])\n yvec = h.Vector(self._points[:2,1])\n zvec = h.Vector(self._points[:2,2])\n dvec = h.Vector(self._diams[:2])\n h.pt3dadd(xvec, yvec, zvec, dvec, sec=self.neck)\n # spine head\n xvec = h.Vector(self._points[2:4,0])\n yvec = h.Vector(self._points[2:4,1])\n zvec = h.Vector(self._points[2:4,2])\n dvec = h.Vector(self._diams[2:4])\n h.pt3dadd(xvec, yvec, zvec, dvec, sec=self.head)\n\n def connect(self):\n self.head.connect(self.neck)\n self.neck.connect(self._sec(self._sec_x))\n\n def biophysics(self):\n for sec in (self.neck, self.head):\n sec.cm = self._sec.cm\n sec.Ra = self._Ra\n sec.insert('pas')\n sec.g_pas = self._sec(self._sec_x).g_pas\n sec.e_pas = self._sec(self._sec_x).e_pas\n", "sub_path": "dlutils/spine.py", "file_name": "spine.py", "file_ext": "py", "file_size_in_byte": 3606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "neuron.h.Section", "line_number": 78, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 78, "usage_type": "name"}, {"api_name": "neuron.h.Section", "line_number": 79, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 79, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 88, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 88, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 89, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 89, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 90, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 90, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 91, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 91, "usage_type": "name"}, {"api_name": "neuron.h.pt3dadd", "line_number": 92, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 92, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 94, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 94, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 95, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 95, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 96, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 96, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 97, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 97, "usage_type": "name"}, {"api_name": "neuron.h.pt3dadd", "line_number": 98, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "553749270", "text": "\n\n'''\n.. module:: skrf.plotting\n========================================\nplotting (:mod:`skrf.plotting`)\n========================================\n\n\nThis module provides general plotting functions.\n\nPlots and Charts\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n smith\n plot_smith\n plot_rectangular\n plot_polar\n plot_complex_rectangular \n plot_complex_polar\n \nMisc Functions\n-----------------\n\n.. autosummary::\n :toctree: generated/\n \n save_all_figs\n add_markers_to_lines\n legend_off\n func_on_all_figs\n scrape_legend\n\n'''\nimport pylab as plb\nimport numpy as npy\nfrom matplotlib.patches import Circle # for drawing smith chart\nfrom matplotlib.pyplot import quiver\nfrom matplotlib import rcParams\n#from matplotlib.lines import Line2D # for drawing smith chart\n\n\n\ndef smith(smithR=1, chart_type = 'z', draw_labels = False, border=False,\n ax=None, ref_imm = 1.0, draw_vswr=None):\n '''\n plots the smith chart of a given radius\n\n Parameters\n -----------\n smithR : number\n radius of smith chart\n chart_type : ['z','y','zy', 'yz']\n Contour type. Possible values are\n * *'z'* : lines of constant impedance\n * *'y'* : lines of constant admittance\n * *'zy'* : lines of constant impedance stronger than admittance\n * *'yz'* : lines of constant admittance stronger than impedance\n draw_labels : Boolean\n annotate real and imaginary parts of impedance on the \n chart (only if smithR=1)\n border : Boolean\n draw a rectangular border with axis ticks, around the perimeter \n of the figure. Not used if draw_labels = True\n \n ax : matplotlib.axes object\n existing axes to draw smith chart on\n \n ref_imm : number\n Reference immittance for center of Smith chart. Only changes\n labels, if printed.\n\n draw_vswr : list of numbers, Boolean or None\n draw VSWR circles. If True, default values are used.\n\n '''\n ##TODO: fix this function so it doesnt suck\n if ax == None:\n ax1 = plb.gca()\n else:\n ax1 = ax\n\n # contour holds matplotlib instances of: pathes.Circle, and lines.Line2D, which\n # are the contours on the smith chart\n contour = []\n\n # these are hard-coded on purpose,as they should always be present\n rHeavyList = [0,1]\n xHeavyList = [1,-1]\n\n #TODO: fix this\n # these could be dynamically coded in the future, but work good'nuff for now\n if not draw_labels:\n rLightList = plb.logspace(3,-5,9,base=.5)\n xLightList = plb.hstack([plb.logspace(2,-5,8,base=.5), -1*plb.logspace(2,-5,8,base=.5)])\n else:\n rLightList = plb.array( [ 0.2, 0.5, 1.0, 2.0, 5.0 ] )\n xLightList = plb.array( [ 0.2, 0.5, 1.0, 2.0 , 5.0, -0.2, -0.5, -1.0, -2.0, -5.0 ] )\n\n # vswr lines\n if isinstance(draw_vswr, (tuple,list)):\n vswrVeryLightList = draw_vswr\n elif draw_vswr is True:\n # use the default I like\n vswrVeryLightList = [1.5, 2.0, 3.0, 5.0]\n else:\n vswrVeryLightList = []\n\n # cheap way to make a ok-looking smith chart at larger than 1 radii\n if smithR > 1:\n rMax = (1.+smithR)/(1.-smithR)\n rLightList = plb.hstack([ plb.linspace(0,rMax,11) , rLightList ])\n\n if chart_type.startswith('y'):\n y_flip_sign = -1\n else:\n y_flip_sign = 1\n\n # draw impedance and/or admittance\n both_charts = chart_type in ('zy', 'yz')\n\n\n # loops through Verylight, Light and Heavy lists and draws circles using patches\n # for analysis of this see R.M. Weikles Microwave II notes (from uva)\n\n superLightColor = dict(ec='whitesmoke', fc='none')\n veryLightColor = dict(ec='lightgrey', fc='none')\n lightColor = dict(ec='grey', fc='none')\n heavyColor = dict(ec='black', fc='none')\n\n # vswr circules verylight\n for vswr in vswrVeryLightList:\n radius = (vswr-1.0) / (vswr+1.0)\n contour.append( Circle((0, 0), radius, **veryLightColor))\n\n # impedance/admittance circles\n for r in rLightList:\n center = (r/(1.+r)*y_flip_sign,0 )\n radius = 1./(1+r)\n if both_charts:\n contour.insert(0, Circle((-center[0], center[1]), radius, **superLightColor))\n contour.append(Circle(center, radius, **lightColor))\n for x in xLightList:\n center = (1*y_flip_sign,1./x)\n radius = 1./x\n if both_charts:\n contour.insert(0, Circle( (-center[0], center[1]), radius, **superLightColor))\n contour.append(Circle(center, radius, **lightColor))\n\n for r in rHeavyList:\n center = (r/(1.+r)*y_flip_sign,0 )\n radius = 1./(1+r)\n contour.append(Circle(center, radius, **heavyColor))\n for x in xHeavyList:\n center = (1*y_flip_sign,1./x)\n radius = 1./x\n contour.append(Circle(center, radius, **heavyColor))\n\n # clipping circle\n clipc = Circle( [0,0], smithR, ec='k',fc='None',visible=True)\n ax1.add_patch( clipc)\n\n #draw x and y axis\n ax1.axhline(0, color='k', lw=.1, clip_path=clipc)\n ax1.axvline(1*y_flip_sign, color='k', clip_path=clipc)\n ax1.grid(0)\n # Set axis limits by plotting white points so zooming works properly\n ax1.plot(smithR*npy.array([-1.1, 1.1]), smithR*npy.array([-1.1, 1.1]), 'w.', markersize = 0)\n ax1.axis('image') # Combination of 'equal' and 'tight'\n \n \n if not border: \n ax1.yaxis.set_ticks([])\n ax1.xaxis.set_ticks([])\n for loc, spine in ax1.spines.items():\n spine.set_color('none')\n \n \n if draw_labels:\n #Clear axis\n ax1.yaxis.set_ticks([])\n ax1.xaxis.set_ticks([])\n for loc, spine in ax1.spines.items():\n spine.set_color('none')\n\n # Make annotations only if the radius is 1\n if smithR is 1:\n #Make room for annotation\n ax1.plot(npy.array([-1.25, 1.25]), npy.array([-1.1, 1.1]), 'w.', markersize = 0)\n ax1.axis('image')\n\n #Annotate real part\n for value in rLightList:\n # Set radius of real part's label; offset slightly left (Z\n # chart, y_flip_sign == 1) or right (Y chart, y_flip_sign == -1)\n # so label doesn't overlap chart's circles\n rho = (value - 1)/(value + 1) - y_flip_sign*0.01\n if y_flip_sign is 1:\n halignstyle = \"right\"\n else:\n halignstyle = \"left\"\n ax1.annotate(str(value*ref_imm), xy=(rho*smithR, 0.01),\n xytext=(rho*smithR, 0.01), ha = halignstyle, va = \"baseline\")\n\n #Annotate imaginary part\n radialScaleFactor = 1.01 # Scale radius of label position by this\n # factor. Making it >1 places the label\n # outside the Smith chart's circle\n for value in xLightList:\n #Transforms from complex to cartesian\n S = (1j*value - 1) / (1j*value + 1)\n S *= smithR * radialScaleFactor\n rhox = S.real\n rhoy = S.imag * y_flip_sign\n \n # Choose alignment anchor point based on label's value\n if ((value == 1.0) or (value == -1.0)):\n halignstyle = \"center\"\n elif (rhox < 0.0):\n halignstyle = \"right\"\n else:\n halignstyle = \"left\"\n \n if (rhoy < 0):\n valignstyle = \"top\"\n else:\n valignstyle = \"bottom\"\n #Annotate value\n ax1.annotate(str(value*ref_imm) + 'j', xy=(rhox, rhoy),\n xytext=(rhox, rhoy), ha = halignstyle, va = valignstyle)\n\n #Annotate 0 and inf\n ax1.annotate('0.0', xy=(-1.02, 0), xytext=(-1.02, 0),\n ha = \"right\", va = \"center\")\n ax1.annotate('$\\infty$', xy=(radialScaleFactor, 0), xytext=(radialScaleFactor, 0),\n ha = \"left\", va = \"center\")\n\n # annotate vswr circles\n for vswr in vswrVeryLightList:\n rhoy = (vswr-1.0) / (vswr+1.0)\n\n ax1.annotate(str(vswr), xy=(0, rhoy*smithR),\n xytext=(0, rhoy*smithR), ha=\"center\", va=\"bottom\",\n color='grey', size='smaller')\n\n # loop though contours and draw them on the given axes\n for currentContour in contour:\n cc=ax1.add_patch(currentContour)\n cc.set_clip_path(clipc)\n\ndef plot_rectangular(x, y, x_label=None, y_label=None, title=None,\n show_legend=True, axis='tight', ax=None, *args, **kwargs):\n '''\n plots rectangular data and optionally label axes.\n\n Parameters\n ------------\n z : array-like, of complex data\n data to plot\n x_label : string\n x-axis label\n y_label : string\n y-axis label\n title : string\n plot title\n show_legend : Boolean\n controls the drawing of the legend\n ax : :class:`matplotlib.axes.AxesSubplot` object\n axes to draw on\n *args,**kwargs : passed to pylab.plot\n \n '''\n if ax is None:\n ax = plb.gca()\n\n my_plot = ax.plot(x, y, *args, **kwargs)\n\n if x_label is not None:\n ax.set_xlabel(x_label)\n\n if y_label is not None:\n ax.set_ylabel(y_label)\n\n if title is not None:\n ax.set_title(title)\n\n if show_legend:\n # only show legend if they provide a label\n if 'label' in kwargs:\n ax.legend()\n\n if axis is not None:\n ax.autoscale(True, 'x', True)\n ax.autoscale(True, 'y', False)\n \n if plb.isinteractive():\n plb.draw()\n \n return my_plot\n\ndef plot_polar(theta, r, x_label=None, y_label=None, title=None,\n show_legend=True, axis_equal=False, ax=None, *args, **kwargs):\n '''\n plots polar data on a polar plot and optionally label axes.\n\n Parameters\n ------------\n theta : array-like\n data to plot\n r : array-like\n \n x_label : string\n x-axis label\n y_label : string\n y-axis label\n title : string\n plot title\n show_legend : Boolean\n controls the drawing of the legend\n ax : :class:`matplotlib.axes.AxesSubplot` object\n axes to draw on\n *args,**kwargs : passed to pylab.plot\n \n See Also\n ----------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n\n '''\n if ax is None:\n ax = plb.gca(polar=True)\n\n ax.plot(theta, r, *args, **kwargs)\n\n if x_label is not None:\n ax.set_xlabel(x_label)\n\n if y_label is not None:\n ax.set_ylabel(y_label)\n\n if title is not None:\n ax.set_title(title)\n\n if show_legend:\n # only show legend if they provide a label\n if 'label' in kwargs:\n ax.legend()\n\n if axis_equal:\n ax.axis('equal')\n \n if plb.isinteractive():\n plb.draw()\n\ndef plot_complex_rectangular(z, x_label='Real', y_label='Imag',\n title='Complex Plane', show_legend=True, axis='equal', ax=None,\n *args, **kwargs):\n '''\n plot complex data on the complex plane\n\n Parameters\n ------------\n z : array-like, of complex data\n data to plot\n x_label : string\n x-axis label\n y_label : string\n y-axis label\n title : string\n plot title\n show_legend : Boolean\n controls the drawing of the legend\n ax : :class:`matplotlib.axes.AxesSubplot` object\n axes to draw on\n *args,**kwargs : passed to pylab.plot\n\n See Also\n ----------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n \n '''\n x = npy.real(z)\n y = npy.imag(z)\n plot_rectangular(x=x, y=y, x_label=x_label, y_label=y_label,\n title=title, show_legend=show_legend, axis=axis,\n ax=ax, *args, **kwargs)\n\ndef plot_complex_polar(z, x_label=None, y_label=None,\n title=None, show_legend=True, axis_equal=False, ax=None,\n *args, **kwargs):\n '''\n plot complex data in polar format.\n\n Parameters\n ------------\n z : array-like, of complex data\n data to plot\n x_label : string\n x-axis label\n y_label : string\n y-axis label\n title : string\n plot title\n show_legend : Boolean\n controls the drawing of the legend\n ax : :class:`matplotlib.axes.AxesSubplot` object\n axes to draw on\n *args,**kwargs : passed to pylab.plot\n\n See Also\n ----------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n '''\n theta = npy.angle(z)\n r = npy.abs(z)\n plot_polar(theta=theta, r=r, x_label=x_label, y_label=y_label,\n title=title, show_legend=show_legend, axis_equal=axis_equal,\n ax=ax, *args, **kwargs)\n\ndef plot_smith(s, smith_r=1, chart_type='z', x_label='Real',\n y_label='Imaginary', title='Complex Plane', show_legend=True,\n axis='equal', ax=None, force_chart = False, draw_vswr=None, *args, **kwargs):\n '''\n plot complex data on smith chart\n\n Parameters\n ------------\n s : complex array-like\n reflection-coeffient-like data to plot\n smith_r : number\n radius of smith chart\n chart_type : ['z','y']\n Contour type for chart.\n * *'z'* : lines of constant impedance\n * *'y'* : lines of constant admittance\n x_label : string\n x-axis label\n y_label : string\n y-axis label\n title : string\n plot title\n show_legend : Boolean\n controls the drawing of the legend\n axis_equal: Boolean\n sets axis to be equal increments (calls axis('equal'))\n force_chart : Boolean\n forces the re-drawing of smith chart \n ax : :class:`matplotlib.axes.AxesSubplot` object\n axes to draw on\n *args,**kwargs : passed to pylab.plot\n\n See Also\n ----------\n plot_rectangular : plots rectangular data\n plot_complex_rectangular : plot complex data on complex plane\n plot_polar : plot polar data\n plot_complex_polar : plot complex data on polar plane\n plot_smith : plot complex data on smith chart\n '''\n \n if ax is None:\n ax = plb.gca()\n\n # test if smith chart is already drawn\n if not force_chart:\n if len(ax.patches) == 0:\n smith(ax=ax, smithR = smith_r, chart_type=chart_type, draw_vswr=draw_vswr)\n\n plot_complex_rectangular(s, x_label=x_label, y_label=y_label,\n title=title, show_legend=show_legend, axis=axis,\n ax=ax, *args, **kwargs)\n\n ax.axis(smith_r*npy.array([-1.1, 1.1, -1.1, 1.1]))\n if plb.isinteractive():\n plb.draw()\n\n\ndef subplot_params(ntwk, param='s', proj='db', size_per_port=4, newfig=True, \n add_titles=True, keep_it_tight=True, subplot_kw={}, *args, **kw):\n '''\n Plot all networks parameters individually on subplots\n \n Parameters\n --------------\n \n \n '''\n if newfig:\n f,axs= plb.subplots(ntwk.nports,ntwk.nports,\n figsize =(size_per_port*ntwk.nports,\n size_per_port*ntwk.nports ),\n **subplot_kw)\n else:\n f = plb.gcf() \n axs = npy.array(f.get_axes())\n\n for ports,ax in zip(ntwk.port_tuples, axs.flatten()):\n plot_func = ntwk.__getattribute__('plot_%s_%s'%(param, proj))\n plot_func(m=ports[0], n=ports[1], ax=ax,*args, **kw)\n if add_titles:\n ax.set_title('%s%i%i'%(param.upper(),ports[0]+1, ports[1]+1))\n if keep_it_tight:\n plb.tight_layout()\n return f,axs\n\ndef shade_bands(edges, y_range=None,cmap='prism', **kwargs):\n '''\n Shades frequency bands.\n \n when plotting data over a set of frequency bands it is nice to \n have each band visually separated from the other. The kwarg `alpha`\n is useful.\n \n Parameters \n --------------\n edges : array-like\n x-values separating regions of a given shade\n y_range : tuple \n y-values to shade in \n cmap : str\n see matplotlib.cm or matplotlib.colormaps for acceptable values\n \\*\\* : key word arguments\n passed to `matplotlib.fill_between`\n \n Examples \n -----------\n >>> rf.shade_bands([325,500,750,1100], alpha=.2)\n '''\n cmap = plb.cm.get_cmap(cmap)\n y_range=plb.gca().get_ylim()\n axis = plb.axis()\n for k in range(len(edges)-1):\n plb.fill_between(\n [edges[k],edges[k+1]], \n y_range[0], y_range[1], \n color = cmap(1.0*k/len(edges)),\n **kwargs)\n plb.axis(axis)\n\ndef save_all_figs(dir = './', format=None, replace_spaces = True, echo = True):\n '''\n Save all open Figures to disk.\n\n Parameters\n ------------\n dir : string\n path to save figures into\n format : None, or list of strings\n the types of formats to save figures as. The elements of this\n list are passed to :matplotlib:`savefig`. This is a list so that\n you can save each figure in multiple formats.\n echo : bool\n True prints filenames as they are saved\n '''\n if dir[-1] != '/':\n dir = dir + '/'\n for fignum in plb.get_fignums():\n fileName = plb.figure(fignum).get_axes()[0].get_title()\n if replace_spaces:\n fileName = fileName.replace(' ','_')\n if fileName == '':\n fileName = 'unnamedPlot'\n if format is None:\n plb.savefig(dir+fileName)\n if echo:\n print((dir+fileName))\n else:\n for fmt in format:\n plb.savefig(dir+fileName+'.'+fmt, format=fmt)\n if echo:\n print((dir+fileName+'.'+fmt))\nsaf = save_all_figs\n\ndef add_markers_to_lines(ax=None,marker_list=['o','D','s','+','x'], markevery=10):\n '''\n adds markers to existing lings on a plot \n \n this is convinient if you have already have a plot made, but then \n need to add markers afterwards, so that it can be interpreted in \n black and white. The markevery argument makes the markers less \n frequent than the data, which is generally what you want. \n \n Parameters\n -----------\n ax : matplotlib.Axes\n axis which to add markers to, defaults to gca()\n marker_list : list of marker characters\n see matplotlib.plot help for possible marker characters\n markevery : int\n markevery number of points with a marker.\n \n '''\n if ax is None:\n ax=plb.gca()\n lines = ax.get_lines()\n if len(lines) > len (marker_list ):\n marker_list *= 3\n [k[0].set_marker(k[1]) for k in zip(lines, marker_list)]\n [line.set_markevery(markevery) for line in lines]\n\ndef legend_off(ax=None):\n '''\n turn off the legend for a given axes. \n \n if no axes is given then it will use current axes.\n \n Parameters\n -----------\n ax : matplotlib.Axes object\n axes to operate on \n '''\n if ax is None:\n plb.gca().legend_.set_visible(0)\n else:\n ax.legend_.set_visible(0)\n\ndef scrape_legend(n=None, ax=None):\n '''\n scrapes a legend with redundant labels\n \n Given a legend of m entries of n groups, this will remove all but \n every m/nth entry. This is used when you plot many lines representing\n the same thing, and only want one label entry in the legend for the\n whole ensemble of lines\n \n '''\n \n if ax is None:\n ax = plb.gca()\n \n handles, labels = ax.get_legend_handles_labels()\n \n if n is None:\n n =len ( set(labels))\n \n if n>len(handles):\n raise ValueError('number of entries is too large')\n \n k_list = [int(k) for k in npy.linspace(0,len(handles)-1,n)]\n ax.legend([handles[k] for k in k_list], [labels[k] for k in k_list])\n\ndef func_on_all_figs(func, *args, **kwargs):\n '''\n runs a function after making all open figures current. \n \n useful if you need to change the properties of many open figures \n at once, like turn off the grid. \n \n Parameters\n ----------\n func : function\n function to call\n \\*args, \\*\\*kwargs : pased to func\n \n Examples\n ----------\n >>> rf.func_on_all_figs(grid,alpha=.3)\n '''\n for fig_n in plb.get_fignums():\n fig = plb.figure(fig_n)\n for ax_n in fig.axes:\n fig.add_axes(ax_n) # trick to make axes current\n func(*args, **kwargs)\n plb.draw()\n\nfoaf = func_on_all_figs\n\ndef plot_vector(a, off=0+0j, *args, **kwargs):\n '''\n plot a 2d vector \n '''\n return quiver(off.real,off.imag,a.real,a.imag,scale_units ='xy', \n angles='xy',scale=1, *args, **kwargs)\n\n\ndef colors():\n return rcParams['axes.color_cycle']\n", "sub_path": "skrf/plotting.py", "file_name": "plotting.py", "file_ext": "py", "file_size_in_byte": 21318, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pylab.gca", "line_number": 82, "usage_type": "call"}, {"api_name": "pylab.logspace", "line_number": 97, "usage_type": "call"}, {"api_name": "pylab.hstack", "line_number": 98, "usage_type": "call"}, {"api_name": "pylab.logspace", "line_number": 98, "usage_type": "call"}, {"api_name": "pylab.array", "line_number": 100, "usage_type": "call"}, {"api_name": "pylab.array", "line_number": 101, "usage_type": "call"}, {"api_name": "pylab.hstack", "line_number": 115, "usage_type": "call"}, {"api_name": "pylab.linspace", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 277, "usage_type": "call"}, {"api_name": "pylab.isinteractive", "line_number": 299, "usage_type": "call"}, {"api_name": "pylab.draw", "line_number": 300, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 337, "usage_type": "call"}, {"api_name": "pylab.isinteractive", "line_number": 358, "usage_type": "call"}, {"api_name": "pylab.draw", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.imag", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 429, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 487, "usage_type": "call"}, {"api_name": "pylab.isinteractive", "line_number": 488, "usage_type": "call"}, {"api_name": "pylab.draw", "line_number": 489, "usage_type": "call"}, {"api_name": "pylab.subplots", "line_number": 503, "usage_type": "call"}, {"api_name": "pylab.gcf", "line_number": 508, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 509, "usage_type": "call"}, {"api_name": "pylab.tight_layout", "line_number": 517, "usage_type": "call"}, {"api_name": "pylab.cm.get_cmap", "line_number": 543, "usage_type": "call"}, {"api_name": "pylab.cm", "line_number": 543, "usage_type": "attribute"}, {"api_name": "pylab.gca", "line_number": 544, "usage_type": "call"}, {"api_name": "pylab.axis", "line_number": 545, "usage_type": "call"}, {"api_name": "pylab.fill_between", "line_number": 547, "usage_type": "call"}, {"api_name": "pylab.axis", "line_number": 552, "usage_type": "call"}, {"api_name": "pylab.get_fignums", "line_number": 571, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 572, "usage_type": "call"}, {"api_name": "pylab.savefig", "line_number": 578, "usage_type": "call"}, {"api_name": "pylab.savefig", "line_number": 583, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 608, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 627, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 653, "usage_type": "call"}, {"api_name": "pylab.get_fignums", "line_number": 673, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 674, "usage_type": "call"}, {"api_name": "pylab.draw", "line_number": 678, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.quiver", "line_number": 686, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 691, "usage_type": "name"}]} +{"seq_id": "521508615", "text": "import cv2\nimport numpy as np\nimport time\nstart_time = time.time()\n\ndef paintArea(img, color):\n\tfor i in xrange(len(img)):\n\t\tfor j in xrange(len(img[0])):\n\t\t\tif(img[i][j][0] != 0 and img[i][j][1] != 0 and img[i][j][2] != 0):\n\t\t\t\timg[i][j] = color;\n\n\nimg = cv2.imread('cores.jpeg')\n#cv2.imshow('img', img)\nresult = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n\n#k = cv2.waitKey(0) \n # blue\nlower = np.array([100,50,50])\nupper = np.array([140,255,255])\nmask = cv2.inRange(result, lower, upper)\nblue = cv2.bitwise_and(img, img, mask= mask)\nm, blue = cv2.threshold(blue, 10, 255, cv2.THRESH_BINARY)\nbluecolor = np.array([255, 0, 0])\nblue = cv2.bitwise_and(blue, bluecolor)\n\n#paintArea(blue, [255, 0, 0])\n# green\nlower = np.array([40,50,50])\nupper = np.array([60,255,255])\nmask = cv2.inRange(result, lower, upper)\ngreen = cv2.bitwise_and(img, img, mask= mask)\nkernel = np.ones((5,5),np.uint8)\n#green = cv2.erode(green, kernel,iterations = 1)\nm, green = cv2.threshold(green, 10, 255, cv2.THRESH_BINARY)\ngreencolor = np.array([0, 255, 0])\ngreen = cv2.bitwise_and(green, greencolor)\n\n#paintArea(green, [0, 255, 0])\n# yellow\nlower = np.array([10,140,50])\nupper = np.array([40,255,255])\nkernel = np.ones((10,10),np.uint8)\n#yellow = cv2.erode(result, kernel,iterations = 1)\nmask = cv2.inRange(result, lower, upper)\nyellow = cv2.bitwise_and(img, img, mask= mask)\nm, yellow = cv2.threshold(yellow, 10, 255, cv2.THRESH_BINARY)\nyellowcolor = np.array([0, 255, 255])\nyellow = cv2.bitwise_and(yellow, yellowcolor)\n\n#paintArea(yellow, [0, 255, 255])\n# orange \nlower = np.array([7,140,50])\nupper = np.array([17,255,255])\nkernel = np.ones((10,10),np.uint8)\n#orange = cv2.erode(result, kernel,iterations = 1)\nkernel = np.ones((5,5),np.uint8)\n#orange = cv2.dilate(orange, kernel,iterations = 1)\nmask = cv2.inRange(result, lower, upper)\norange = cv2.bitwise_and(img, img, mask= mask)\nm, orange = cv2.threshold(orange, 10, 255, cv2.THRESH_BINARY)\norangecolor = np.array([0, 128, 255])\norange = cv2.bitwise_and(orange, orangecolor)\n\n#paintArea(orange, [0, 128, 255])\n# brown\nlower = np.array([3,180,50])\nupper = np.array([6,255,255])\nkernel = np.ones((10,10),np.uint8)\n#brown = cv2.erode(result, kernel,iterations = 1)\nkernel = np.ones((5,5),np.uint8)\n#brown = cv2.dilate(brown, kernel,iterations = 2)\nmask = cv2.inRange(result, lower, upper)\nbrown = cv2.bitwise_and(img, img, mask= mask)\nm, brown = cv2.threshold(brown, 10, 255, cv2.THRESH_BINARY)\nbrowncolor = np.array([0, 76, 153])\nbrown = cv2.bitwise_and(brown, browncolor)\n#paintArea(brow, [0, 76, 153])\n# red\nlower = np.array([3,50,150])\nupper = np.array([7,255,255])\nmask = cv2.inRange(result, lower, upper)\nred = cv2.bitwise_and(img, img, mask= mask)\nkernel = np.ones((2,2),np.uint8)\n#red = cv2.erode(red, kernel,iterations = 2)\nkernel = np.ones((6,6),np.uint8)\n#red = cv2.dilate(red, kernel,iterations = 2)\nkernel = np.ones((7,7),np.uint8)\n#red = cv2.erode(red, kernel,iterations = 2)\nm, red = cv2.threshold(red, 10, 255, cv2.THRESH_BINARY)\nredcolor = np.array([0, 0, 255])\nred = cv2.bitwise_and(red, redcolor)\n\n#paintArea(result, [0, 0, 255])\n\nresult = cv2.bitwise_or(brown, red)\nresult = cv2.bitwise_or(green, result)\nresult = cv2.bitwise_or(blue, result)\nresult = cv2.bitwise_or(yellow, result)\nresult = cv2.bitwise_or(orange, result)\n\n#result = np.hstack((img, result))\n#cv2.imshow('img', result)\n#cv2.imwrite('simple.jpeg', result)\n#k = cv2.waitKey(0) \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n", "sub_path": "ex1/simple.py", "file_name": "simple.py", "file_ext": "py", "file_size_in_byte": 3444, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "time.time", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "175679605", "text": "import sys\n\nimport numpy\nfrom setuptools import find_packages, setup\n\n\"\"\"\nSome cheat-codes for setting up a PyPi repo:\n \n To re-setup: \n \n python setup.py bdist_wheel\n\n pip install -r requirements.txt --process-dependency-links\n\n To test on test pypi:\n \n twine upload --repository testpypi dist/*\n\"\"\"\n\nPACKAGE_NAME = \"pyautomagic\"\nVERSION = \"0.1\"\nDESCRIPTION = \"A Python3 package for eeg (pre)processing from Automagic.\"\nURL = 'https://github.com/NeuroDataDesign/pyautomagic'\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.6\nREQUIRED_PACKAGES = [\n \"numpy>=1.14.5\",\n \"scipy>=1.1.0\",\n \"scikit-learn>=0.19.2\",\n \"pandas>=0.23.4\",\n \"mne>=0.18.2\",\n \"mne-bids>=0.3\",\n]\nCLASSIFICATION_OF_PACKAGE = [\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: Scientific/Engineering',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation',\n\n 'Natural Language :: English',\n]\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n packages=find_packages(),\n long_description=open('README.md').read(),\n url=URL,\n author='NDD19',\n license='MIT',\n # include_dirs=[numpy.get_include()],\n dependency_links=[\n 'git+https://github.com/NeuroDataDesign/pyautomagic#egg=pyautomagic',\n ],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n classifiers=CLASSIFICATION_OF_PACKAGE,\n zip_ok=False,\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.version_info", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 61, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "85351455", "text": "import setuptools\nfrom pipenv.project import Project\nfrom pipenv.utils import convert_deps_to_pip\n\npfile = Project(chdir=False).parsed_pipfile\nrequirements = convert_deps_to_pip(pfile['packages'], r=False)\ntest_requirements = convert_deps_to_pip(pfile['dev-packages'], r=False)\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"gin\",\n version=\"0.1\",\n author=\"Bilal Elmoussaoui\",\n author_email=\"bil.elmoussaoui@gmail.com\",\n description=\"Not Wine\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n license='MIT',\n url=\"https://github.com/bilelmoussaoui/gin\",\n entry_points={\n 'console_scripts': ['gin=gin.cli:main'],\n },\n packages=[\n 'gin',\n 'gin.dependencies',\n 'gin.export',\n 'gin.sources'\n ],\n install_requires=requirements,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: MIT License\",\n 'Topic :: Utilities',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n tests_require=test_requirements,\n test_suite='tests'\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1185, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pipenv.project.Project", "line_number": 5, "usage_type": "call"}, {"api_name": "pipenv.utils.convert_deps_to_pip", "line_number": 6, "usage_type": "call"}, {"api_name": "pipenv.utils.convert_deps_to_pip", "line_number": 7, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "382846942", "text": "# Main Method - OCR\nimport numpy as np\nimport pytesseract\nfrom pathlib import Path\nfrom preprocess_steps import TextPreProcessor\nfrom PIL import Image\n\n\nclass MainOCRProcessor(TextPreProcessor):\n def apply_preprocessor(self, pdf_images):\n if pdf_images is not None:\n for page in pdf_images:\n # convert image to array\n doc_array = np.array(page, dtype=np.uint8)\n grayed_image = self.img_to_gray(doc_array)\n edges_array = self.blur_img(grayed_image)\n init_contours = self.bw_binary_img(edges_array)\n max_contours = self.find_max_contours(init_contours)\n skew_angle = self.draw_box(max_contours)\n rotated_image = self.check_rotated_img(grayed_image, skew_angle)\n sharpened_image = self.sharpen_img(rotated_image)\n return sharpened_image\n else:\n raise ValueError(\"Invalid data provided. Must be list of image(s)\")\n\n def ocr_extractor(self, sharpened_image_list):\n docs_text = []\n rotated_images_array = []\n text = str(pytesseract.image_to_string((sharpened_image_list))).replace(\n \"\\n\", \" \"\n )\n docs_text.append(text)\n rotated_images_array.append(Image.fromarray(sharpened_image_list))\n\n return docs_text, rotated_images_array\n\n\nif __name__ == \"__main__\":\n mop = MainOCRProcessor() \n pathlist = Path(\"testFiles\").glob(\"*.pdf\")\n filepath = [str(x) for x in pathlist]\n for doc in filepath:\n img = mop.pdf2image_converter_from_path(doc)\n post_proc_img = mop.apply_preprocessor(img)\n doc_txt, rotated_img = mop.ocr_extractor(post_proc_img)\n print(doc_txt)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1737, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "preprocess_steps.TextPreProcessor", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytesseract.image_to_string", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "365965487", "text": "from sqlalchemy import Column, Integer, String, create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nBase = declarative_base()\nengine = create_engine('sqlite:///restaurant.db')\nBase.metadata.bind = engine\n\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\nclass Restaurant(Base):\n __tablename__ = 'restaurant'\n id = Column(Integer, primary_key=True)\n name = Column(String(100), nullable=False)\n\n def __repr__(self):\n return '' % self.name\n\n @staticmethod\n def query(name):\n assert isinstance(name, str)\n return session.query(Restaurant).filter(Restaurant.name == name).first()\n\n @staticmethod\n def query_by_id(id):\n try:\n id = int(id)\n except ValueError:\n raise ValueError(\"Cannot convert %s to Integer\" % id)\n return session.query(Restaurant).filter(Restaurant.id == id).first()\n\n @staticmethod\n def query_all():\n return session.query(Restaurant).all()\n\n @staticmethod\n def rename(id, name):\n r = Restaurant.query_by_id(id)\n r.name = name\n session.add(r)\n session.commit()\n \n @staticmethod\n def insert(name):\n if Restaurant.query(name) is not None:\n raise ValueError(\"Restaurant %r already exists\" % name)\n else:\n res = Restaurant(name=name)\n session.add(res)\n session.commit()\n\n @staticmethod\n def delete(id):\n r = Restaurant.query_by_id(id)\n if r is None:\n return False\n else:\n session.delete(r)\n session.commit()\n return True\n\n\nBase.metadata.create_all(engine)\n", "sub_path": "Lession-2/first-web-server/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 5, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 14, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "159898263", "text": "from itertools import combinations\n\nfrom config import RSA_bits\nfrom utils.util import mod_div, prod\n\n\ndef lagrange(x, y, g):\n assert len(x) == len(y) and len(x) > 0, \"Lengths of x and y must equal and non-zero.\"\n x_len = len(x)\n f = [0] * x_len\n for i in range(x_len):\n partial = []\n combo_list = list(x)\n combo_list.pop(i)\n for j in range(x_len):\n c = 0\n for k in combinations(combo_list, j):\n c += prod(map(lambda q: -q, k))\n partial.append(c)\n d = 1\n for j in range(x_len):\n if j != i:\n d *= x[i] - x[j]\n\n partial = map(lambda q: mod_div(q * y[i], d, g), partial)\n f = [(m + n) % g for m, n in zip(f, partial)] # also needs % G\n\n for i in range(x_len):\n assert compute_poly(f, x[i], g) == y[i], i\n return f\n\n\ndef bytes_to_int(m):\n return int.from_bytes(m, byteorder=\"big\")\n\n\ndef int_to_bytes(i):\n return i.to_bytes(RSA_bits // 8, byteorder=\"big\")\n\n\ndef strip_padding(b, secret_length):\n return b[(RSA_bits // 8 - secret_length):]\n # return b\n\n\ndef compute_poly(f, x, m):\n y = 0\n for i in range(len(f)):\n y += f[i] * pow(x, len(f) - 1 - i, m)\n return y % m\n", "sub_path": "utils/ot.py", "file_name": "ot.py", "file_ext": "py", "file_size_in_byte": 1244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "itertools.combinations", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.util.prod", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.util.mod_div", "line_number": 25, "usage_type": "call"}, {"api_name": "config.RSA_bits", "line_number": 38, "usage_type": "name"}, {"api_name": "config.RSA_bits", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "253752439", "text": "import os\n\nfrom panda3d.core import Filename, GeoMipTerrain, LQuaternionf, loadPrcFileData, Texture, TextureStage\n\nfrom . import FruitConfigParser\n\nclass Scene(object):\n def __path(self, filename):\n return os.path.join(self.__directory, filename)\n\n def __texture_path(self, texture):\n return Filename.fromOsSpecific(self.__path(self.__scene.get(\"textures\", texture)))\n\n def __create_terrain(self):\n terrain = GeoMipTerrain(\"Terrain\")\n terrain.setHeightfield(self.__texture_path(self.__scene.get(\"scene\", \"heightmap\")))\n terrain.getRoot().reparentTo(self.render)\n terrain.generate()\n terrain.getRoot().setSx(1000.0 / 512)\n terrain.getRoot().setSy(1000.0 / 512)\n terrain.getRoot().setSz(74)\n terrain.getRoot().setPos(-500, -500, 0)\n\n black = self.loader.loadTexture(self.__texture_path(self.__scene.get(\"terrain\", \"black\")))\n black.setMinfilter(Texture.FTLinearMipmapNearest)\n ts = TextureStage(\"stage-first\")\n ts.setSort(0)\n ts.setMode(TextureStage.MReplace)\n ts.setSavedResult(True)\n terrain.getRoot().setTexture(ts, black)\n terrain.getRoot().setTexScale(ts, 250, 250)\n\n white = self.loader.loadTexture(self.__texture_path(self.__scene.get(\"terrain\", \"white\")))\n white.setMinfilter(Texture.FTLinearMipmapNearest)\n ts = TextureStage(\"stage-second\")\n ts.setSort(1)\n ts.setMode(TextureStage.MReplace)\n terrain.getRoot().setTexture(ts, white)\n terrain.getRoot().setTexScale(ts, 250, 250)\n\n stencil = self.loader.loadTexture(self.__texture_path(self.__scene.get(\"scene\", \"stencil\")))\n ts = TextureStage(\"stage-stencil\")\n ts.setSort(2)\n ts.setCombineRgb(TextureStage.CMInterpolate,\n TextureStage.CSPrevious, TextureStage.COSrcColor,\n TextureStage.CSLastSavedResult, TextureStage.COSrcColor,\n TextureStage.CSTexture, TextureStage.COSrcColor)\n\n terrain.getRoot().setTexture(ts, stencil)\n\n ts = TextureStage(\"stage-vertexcolour\")\n ts.setSort(3)\n ts.setCombineRgb(TextureStage.CMModulate, TextureStage.CSPrevious, TextureStage.COSrcColor,\n TextureStage.CSPrimaryColor, TextureStage.COSrcColor)\n\n terrain.getRoot().setTexture(ts, \"final\")\n\n def __create_skybox(self):\n egg = self.loader.loadModel(\"media/skybox.egg\")\n egg.reparentTo(self.render)\n sky = self.loader.loadTexture(self.__texture_path(self.__scene.get(\"scene\", \"skybox\")))\n egg.setTexture(sky)\n\n def create_scene(self, directory):\n self.__directory = directory\n loadPrcFileData(\"\", \"model-path %s\" % directory)\n self.__scene = FruitConfigParser()\n self.__scene.read(self.__path(\"scene.cfg\"))\n for thing in self.__scene.get(\"scene\", \"things\").split(\" \"):\n egg = self.__scene.get(thing, \"egg\")\n egg = self.loader.loadModel(egg)\n egg.reparentTo(self.render)\n\n egg.setPos(*self.__scene.getfloats(thing, \"location\"))\n egg.setQuat(LQuaternionf(*self.__scene.getfloats(thing, \"rotation\")))\n egg.setScale(*self.__scene.getfloats(thing, \"scale\"))\n\n if self.__scene.has_option(\"scene\", \"stencil\"):\n self.__create_terrain()\n\n if self.__scene.has_option(\"scene\", \"skybox\"):\n self.__create_skybox()\n", "sub_path": "fruit/scene.py", "file_name": "scene.py", "file_ext": "py", "file_size_in_byte": 3451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "panda3d.core.Filename.fromOsSpecific", "line_number": 12, "usage_type": "call"}, {"api_name": "panda3d.core.Filename", "line_number": 12, "usage_type": "name"}, {"api_name": "panda3d.core.GeoMipTerrain", "line_number": 15, "usage_type": "call"}, {"api_name": "panda3d.core.Texture.FTLinearMipmapNearest", "line_number": 25, "usage_type": "attribute"}, {"api_name": "panda3d.core.Texture", "line_number": 25, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage", "line_number": 26, "usage_type": "call"}, {"api_name": "panda3d.core.TextureStage.MReplace", "line_number": 28, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 28, "usage_type": "name"}, {"api_name": "panda3d.core.Texture.FTLinearMipmapNearest", "line_number": 34, "usage_type": "attribute"}, {"api_name": "panda3d.core.Texture", "line_number": 34, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage", "line_number": 35, "usage_type": "call"}, {"api_name": "panda3d.core.TextureStage.MReplace", "line_number": 37, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 37, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage", "line_number": 42, "usage_type": "call"}, {"api_name": "panda3d.core.TextureStage.CMInterpolate", "line_number": 44, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 44, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage.CSPrevious", "line_number": 45, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 45, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage.COSrcColor", "line_number": 45, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage.CSLastSavedResult", "line_number": 46, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 46, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage.COSrcColor", "line_number": 46, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage.CSTexture", "line_number": 47, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 47, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage.COSrcColor", "line_number": 47, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 51, "usage_type": "call"}, {"api_name": "panda3d.core.TextureStage.CMModulate", "line_number": 53, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 53, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage.CSPrevious", "line_number": 53, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage.COSrcColor", "line_number": 53, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage.CSPrimaryColor", "line_number": 54, "usage_type": "attribute"}, {"api_name": "panda3d.core.TextureStage", "line_number": 54, "usage_type": "name"}, {"api_name": "panda3d.core.TextureStage.COSrcColor", "line_number": 54, "usage_type": "attribute"}, {"api_name": "panda3d.core.loadPrcFileData", "line_number": 66, "usage_type": "call"}, {"api_name": "panda3d.core.LQuaternionf", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "344069584", "text": "import logging\nimport sys\nfrom rdflib import Graph, URIRef, RDFS\nfrom AlignmentFormat import serialize_mapping_to_tmp_file\nfrom collections import defaultdict\n\n\ndef match_rdflib(source_graph, target_graph, input_alignment):\n # a very simple label matcher:\n alignment = []\n\n label_to_uri = defaultdict(list)\n for s, p, o in source_graph.triples((None, RDFS.label, None)):\n if isinstance(s, URIRef):\n label_to_uri[str(o)].append(str(s))\n\n for s, p, o in target_graph.triples((None, RDFS.label, None)):\n if isinstance(s, URIRef) and str(o) in label_to_uri:\n for one_uri in label_to_uri[str(o)]:\n alignment.append((one_uri, str(s), \"=\", 1.0))\n return alignment\n # return [('http://one.de', 'http://two.de', '=', 1.0)]\n\n\ndef get_file_from_url(location):\n from urllib.parse import unquote, urlparse\n from urllib.request import url2pathname, urlopen\n\n if location.startswith(\"file:\"):\n return open(url2pathname(unquote(urlparse(location).path)))\n else:\n return urlopen(location)\n\n\ndef match(source_url, target_url, input_alignment_url):\n logging.info(\"Python matcher info: Match \" + source_url + \" to \" + target_url)\n\n # in case you want the file object use\n # source_file = get_file_from_url(source_url)\n # target_file = get_file_from_url(target_url)\n\n source_graph = Graph()\n source_graph.parse(source_url)\n logging.info(\"Read source with %s triples.\", len(source_graph))\n\n target_graph = Graph()\n target_graph.parse(target_url)\n logging.info(\"Read target with %s triples.\", len(target_graph))\n\n input_alignment = None\n # if input_alignment_url is not None:\n\n resulting_alignment = match_rdflib(source_graph, target_graph, input_alignment)\n\n # in case you have the results in a pandas dataframe, make sure you have the columns\n # source (uri), target (uri), relation (usually the string '='), confidence (floating point number)\n # in case relation or confidence is missing use: df[\"relation\"] = '=' and df[\"confidence\"] = 1.0\n # then select the columns in the right order (like df[['source', 'target', 'relation', 'confidence']])\n # because serialize_mapping_to_tmp_file expects an iterbale of source, target, relation, confidence\n # and then call .itertuples(index=False)\n # example: alignment_file_url = serialize_mapping_to_tmp_file(df[['source', 'target', 'relation', 'confidence']].itertuples(index=False))\n\n alignment_file_url = serialize_mapping_to_tmp_file(resulting_alignment)\n return alignment_file_url\n\n\ndef main(argv):\n if len(argv) == 2:\n print(match(argv[0], argv[1], None))\n elif len(argv) >= 3:\n if len(argv) > 3:\n logging.error(\"Too many parameters but we will ignore them.\")\n print(match(argv[0], argv[1], argv[2]))\n else:\n logging.error(\n \"Too few parameters. Need at least two (source and target URL of ontologies\"\n )\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)s:%(message)s\", level=logging.INFO\n )\n main(sys.argv[1:])\n", "sub_path": "examples/externalPythonMatcherWeb/oaei-resources/pythonMatcher.py", "file_name": "pythonMatcher.py", "file_ext": "py", "file_size_in_byte": 3123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.defaultdict", "line_number": 12, "usage_type": "call"}, {"api_name": "rdflib.RDFS.label", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rdflib.RDFS", "line_number": 13, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 14, "usage_type": "argument"}, {"api_name": "rdflib.RDFS.label", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rdflib.RDFS", "line_number": 17, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 18, "usage_type": "argument"}, {"api_name": "urllib.request.url2pathname", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 36, "usage_type": "call"}, {"api_name": "rdflib.Graph", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "rdflib.Graph", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "AlignmentFormat.serialize_mapping_to_tmp_file", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 84, "usage_type": "attribute"}]} +{"seq_id": "476599057", "text": "from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport pymongo\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nimport time \n\ndef init_browser(): \n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=False)\n return browser\n\ndef scrape():\n\n browser = init_browser()\n\n #scrape Nasa site for first headline\n #---------------------------------------#\n #URL of page to be scraped\n url = 'https://mars.nasa.gov/news/'\n browser.visit(url)\n time.sleep(1)\n\n # HTML object\n html = browser.html\n # Parse HTML with Beautiful Soup\n soup = bs(html, 'html.parser')\n\n # Retrieve all elements that contain header information\n headers = soup.find_all('div', class_='content_title')\n paragraphs = soup.find_all('div', class_='article_teaser_body')\n\n news_title = headers[1].text\n news_p = paragraphs[0].text\n \n #quit browser\n browser.quit() \n\n\n #--------MARS Image Scrape--------------#\n\n #URL of page to be scraped\n browser = init_browser()\n url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'\n browser.visit(url)\n time.sleep(1)\n\n browser.links.find_by_partial_text('FULL IMAGE').click()\n\n html = browser.html\n soup = bs(html,'html.parser')\n\n\n results = soup.find('img', class_='fancybox-image')['src']\n\n\n image_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{results}'\n\n #quit browser\n browser.quit() \n\n\n #--------MARS Facts--------------#\n\n browser = init_browser()\n\n #facts url\n facts_url = 'http://space-facts.com/mars/'\n\n # Use Panda's `read_html` to parse the url\n mars_facts = pd.read_html(facts_url)\n\n # Find the mars facts DataFrame in the list of DataFrames as assign it to `mars_df`\n mars_df = mars_facts[0]\n\n # Assign the columns `['Description', 'Value']`\n mars_df.columns = ['Description','Values']\n\n\n # Save html code to folder Assets\n marshtml = mars_df.to_html(index=False)\n\n text_file = open(\"marsfacts.html\", \"w\")\n text_file.write(marshtml)\n text_file.close()\n\n #--------MARS Hemispheres--------------#\n # Visit hemispheres website through splinter module \n hem_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(hem_url)\n\n #html \n html_hem = browser.html\n\n #parse with Beautiful Soup\n soup = bs(html_hem, 'html.parser')\n\n #get items that have mars hemisphere information\n items = soup.find_all('div', class_='item')\n\n #empty list for urls\n listings =[]\n\n #base url\n hem_base_url = 'https://astrogeology.usgs.gov'\n\n #for loop to collect urls\n\n for i in items:\n #store title\n title = i.find('h3').text\n \n #store link that opens full image\n partial_img_url = i.find('a', class_='itemLink product-item')['href']\n \n #visit link that contains full image\n browser.visit(hem_base_url + partial_img_url)\n \n #HTML Object of full screen\n partial_img_html = browser.html\n \n #Parse with BS\n soup_hem = bs(partial_img_html, 'html.parser')\n \n #Full image souce\n full_img = hem_base_url + soup_hem.find('img', class_='wide-image')['src']\n \n #Append into a list of dictionaries\n listings.append({\"title\": title, \"full_img\": full_img})\n \n \n \n browser.quit()\n\n mars_dict = {}\n mars_dict['news_title'] = news_title,\n mars_dict['news_p'] = news_p,\n mars_dict['image_url'] = image_url,\n mars_dict['mars_table'] = marshtml,\n mars_dict['hemisphere_list'] = listings\n\n return mars_dict\n", "sub_path": "scrape_mars.py", "file_name": "scrape_mars.py", "file_ext": "py", "file_size_in_byte": 3766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 10, "usage_type": "call"}, {"api_name": "splinter.Browser", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 97, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "164960587", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 09:04:25 2018\r\n\r\n@author: chenq\r\n\"\"\"\r\nimport time\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\nwindowSize = (1280,720);\r\n \r\nclass Player:\r\n x = 500\r\n y = 300\r\n Ydirections = {'up': -1, 'down': 1, 'none':0}\r\n Xdirections = {'left': -1, 'right':1, 'none':0}\r\n XrunningDirection = 'right'\r\n YrunningDirection = 'none'\r\n speed = 1\r\n rec = pygame.Surface((50,50))\r\n rec.fill((255,255,255))\r\n rect = rec.get_rect()\r\n \r\n def running(self):\r\n XnextStep = self.x + self.Xdirections[self.XrunningDirection] * self.speed\r\n YnextStep = self.y + self.Ydirections[self.YrunningDirection] * self.speed\r\n if (XnextStep >= 0 and XnextStep <= windowSize[0] - 50 and YnextStep >= 0 and YnextStep <= windowSize[1] -50):\r\n self.x = XnextStep\r\n self.y = YnextStep\r\n else:\r\n pass\r\n '''\r\n if (self.x >= 0 and self. x <= windowSize[0] - 50 and self.y >= 0 and self.y <= windowSize[1] - 50):\r\n if(self.runningDirection == 'right' or self.runningDirection == 'left'):\r\n self.x = self.x + self.directions[self.runningDirection] * self.speed\r\n else:\r\n self.y = self.y + self.directions[self.runningDirection] * self.speed\r\n else:\r\n if (self.x < 0 or self.x > windowSize[0] - 50):\r\n self.x = self.x - self.directions[self.runningDirection] * self.speed\r\n if (self.y < 0 or self.y > windowSize[1] - 50):\r\n self.y - self.directions[self.runningDirection] * self.speed\r\n '''\r\n \r\n def changeDirection(self,code):\r\n if (code == 1):\r\n self.YrunningDirection = 'up'\r\n self.XrunningDirection = 'none'\r\n elif (code == 2):\r\n self.YrunningDirection = 'down'\r\n self.XrunningDirection = 'none'\r\n elif (code == 3):\r\n self.XrunningDirection = 'left'\r\n self.YrunningDirection = 'none'\r\n elif (code == 4):\r\n self.XrunningDirection = 'right'\r\n self.YrunningDirection = 'none'\r\n \r\n def moveRight(self):\r\n self.x = self.x + self.speed\r\n \r\n def moveLeft(self):\r\n self.x = self.x - self.speed\r\n \r\n def moveUp(self):\r\n self.y = self.y - self.speed\r\n \r\n def moveDown(self):\r\n self.y = self.y + self.speed\r\n \r\nclass Snake:\r\n x = 200\r\n y = 300\r\n Ydirections = {'up': -1, 'down': 1, 'none':0}\r\n Xdirections = {'left': -1, 'right':1, 'none':0}\r\n XrunningDirection = 'right'\r\n YrunningDirection = 'none'\r\n speed = 2\r\n rec = pygame.Surface((50,50))\r\n rec.fill((255,0,0))\r\n rect = rec.get_rect()\r\n t = time.time()\r\n \r\n def running(self):\r\n XnextStep = self.x + self.Xdirections[self.XrunningDirection] * self.speed\r\n YnextStep = self.y + self.Ydirections[self.YrunningDirection] * self.speed\r\n if (XnextStep >= 0 and XnextStep <= windowSize[0] - 50 and YnextStep >= 0 and YnextStep <= windowSize[1] -50):\r\n self.x = XnextStep\r\n self.y = YnextStep\r\n else:\r\n pass\r\n if (self.speed >= 0.5):\r\n self.speed -= (time.time()-self.t)/10000;\r\n \r\n def changeDirection(self,code):\r\n if (code == 1):\r\n self.YrunningDirection = 'up'\r\n self.XrunningDirection = 'none'\r\n elif (code == 2):\r\n self.YrunningDirection = 'down'\r\n self.XrunningDirection = 'none'\r\n elif (code == 3):\r\n self.XrunningDirection = 'left'\r\n self.YrunningDirection = 'none'\r\n elif (code == 4):\r\n self.XrunningDirection = 'right'\r\n self.YrunningDirection = 'none'\r\n \r\n def moveRight(self):\r\n self.x = self.x + self.speed\r\n \r\n def moveLeft(self):\r\n self.x = self.x - self.speed\r\n \r\n def moveUp(self):\r\n self.y = self.y - self.speed\r\n \r\n def moveDown(self):\r\n self.y = self.y + self.speed\r\n\r\nclass App:\r\n \r\n windowWidth = 800\r\n windowHeight = 600\r\n player = 0\r\n \r\n def __init__(self):\r\n self._running = True\r\n self._display_surf = None\r\n self.player_surf = None\r\n self.snake_surf = None\r\n self.player = Player() \r\n self.snake = Snake()\r\n \r\n def on_init(self):\r\n pygame.init()\r\n self._display_surf = pygame.display.set_mode(windowSize, pygame.HWSURFACE)\r\n \r\n pygame.display.set_caption('Snake game test')\r\n self._running = True\r\n # self._image_surf = pygame.image.load(\"pygame.png\").convert()\r\n self.player_surf = self.player.rec\r\n self.snake_surf = self.snake.rec\r\n \r\n def on_event(self, event):\r\n if event.type == QUIT:\r\n self._running = False\r\n \r\n def on_loop(self):\r\n pass\r\n \r\n def on_render(self):\r\n self._display_surf.fill((0,0,0))\r\n self._display_surf.blit(self.player_surf,(self.player.x,self.player.y))\r\n self._display_surf.blit(self.snake_surf,(self.snake.x,self.snake.y))\r\n pygame.display.flip()\r\n \r\n def on_cleanup(self):\r\n pygame.quit()\r\n \r\n def on_execute(self):\r\n if self.on_init() == False:\r\n self._running = False\r\n \r\n while( self._running ):\r\n pygame.event.pump()\r\n keys = pygame.key.get_pressed() \r\n self.player.running()\r\n self.snake.running()\r\n \r\n if (keys[K_RIGHT]):\r\n #self.player.moveRight()\r\n self.player.changeDirection(4)\r\n \r\n if (keys[K_LEFT]):\r\n # self.player.moveLeft()\r\n self.player.changeDirection(3)\r\n \r\n if (keys[K_UP]):\r\n # self.player.moveUp()\r\n self.player.changeDirection(1)\r\n \r\n if (keys[K_DOWN]):\r\n # self.player.moveDown()\r\n self.player.changeDirection(2)\r\n \r\n if (keys[K_d]):\r\n self.snake.changeDirection(4)\r\n \r\n if (keys[K_a]):\r\n # self.player.moveLeft()\r\n self.snake.changeDirection(3)\r\n \r\n if (keys[K_w]):\r\n # self.player.moveUp()\r\n self.snake.changeDirection(1)\r\n \r\n if (keys[K_s]):\r\n # self.player.moveDown()\r\n self.snake.changeDirection(2)\r\n \r\n if (keys[K_ESCAPE]):\r\n self._running = False\r\n \r\n self.on_loop()\r\n self.on_render()\r\n self.on_cleanup()\r\n \r\nif __name__ == \"__main__\" :\r\n theApp = App()\r\n theApp.on_execute()\r\n", "sub_path": "gameTrialOne.py", "file_name": "gameTrialOne.py", "file_ext": "py", "file_size_in_byte": 6638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.Surface", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.event.pump", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 168, "usage_type": "attribute"}]} +{"seq_id": "476191094", "text": "# Download the helper library from https://www.twilio.com/docs/python/install\nfrom twilio.rest import Client\n\n# Your Account Sid and Auth Token from twilio.com/console\n# DANGER! This is insecure. See http://twil.io/secure\n\ndef send_message(text, number):\n\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(body=text, from_=\"+12029534376\", to=number)\n\n print(message.sid)\n", "sub_path": "inventory_app/twilio_sms.py", "file_name": "twilio_sms.py", "file_ext": "py", "file_size_in_byte": 407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "twilio.rest.Client", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "177947708", "text": "from django.core.management.base import BaseCommand\nfrom events.models import Event\nfrom datetime import date, timedelta\n\n\nclass Command(BaseCommand):\n help = 'Deactivates clients from yesterday\\'s events'\n\n def handle(self, *args, **options):\n # 1 make queryset of events that were yesterday\n # 2 get client\n # 3 check if has any *future* events\n # 4 if no future events deactivate client\n # this way we are only ever running this script on a handful of\n # results at any given time as opposed to iterating through\n # all past events, or all clients\n yesterday = date.today() - timedelta(days=1)\n events = Event.objects.filter(date=yesterday)\n for event in events:\n client = event.client\n has_future_event = client.event_set.future_event().exists()\n if has_future_event is False:\n client.is_active = False\n client.save()\n # commented out the following console output since this will be run as a cron job\n # self.stdout.write(self.style.SUCCESS('%s has no future events. Client Deactivated.' % str(client)))\n # else:\n # self.stdout.write(self.style.SUCCESS('%s has Future Event - No action taken.' % str(client)))\n\n\n\n", "sub_path": "users/management/commands/deactivate_users_short.py", "file_name": "deactivate_users_short.py", "file_ext": "py", "file_size_in_byte": 1312, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 6, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 17, "usage_type": "call"}, {"api_name": "events.models", "line_number": 18, "usage_type": "name"}, {"api_name": "events.models.Event.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "events.models.Event.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 18, "usage_type": "name"}, {"api_name": "events.models", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "439503383", "text": "import os\nimport sys\nfrom traceback import print_exc\n\nimport pytest\n\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n\nfrom qtpy.QtWebEngineWidgets import WEBENGINE\n\nfrom matplotlib.testing.compare import compare_images\n\nM42 = SkyCoord.from_name('M42')\n\nDATA = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))\n\n\ndef check_silent_output(capsys):\n out, err = capsys.readouterr()\n assert out.strip() == \"\"\n assert err.strip() == \"\"\n\n\nclass TestWWTWidget:\n\n def test_settings(self, capsys, wwt_qt_client):\n wwt_qt_client.constellation_figures = True\n wwt_qt_client.constellation_figures = False\n wwt_qt_client.wait(1)\n check_silent_output(capsys)\n\n def test_methods(self, capsys, wwt_qt_client):\n wwt_qt_client.center_on_coordinates(M42, fov=10 * u.deg)\n wwt_qt_client.wait(1)\n check_silent_output(capsys)\n\n def test_coordinates(self, capsys, wwt_qt_client):\n wwt_qt_client.center_on_coordinates(M42, fov=10 * u.deg)\n assert M42.separation(wwt_qt_client.get_center()).arcsec < 1.e-6\n wwt_qt_client.wait(1)\n check_silent_output(capsys)\n\n def test_annotations(self, capsys, wwt_qt_client):\n circle = wwt_qt_client.add_circle()\n circle.opacity = 0.8\n circle.set_center(M42)\n wwt_qt_client.wait(1)\n check_silent_output(capsys)\n\n\n# The following is a template for a script that will allow developers who see\n# a failure in CI to re-create the files that were generated in the\n# continuous integration easily.\nREPRODUCABILITY_SCRIPT = \"\"\"\n################################################################################\n# Export the images that were generated in the continuous integration for pywwt.\n# Just copy and paste all the code between here and '# End of script' into a\n# local file and run it with Python. You can then check if the differences\n# make sense, and if so, update the expected images.\n\nimport base64\n\nexpected = base64.b64decode('{expected}')\n\nwith open('expected.png', 'wb') as f:\n f.write(expected)\n\nactual = base64.b64decode('{actual}')\n\nwith open('actual.png', 'wb') as f:\n f.write(actual)\n\n# End of script\n################################################################################\n\"\"\"\n\n\ndef assert_widget_image(tmpdir, widget, filename):\n actual = tmpdir.join(filename).strpath\n widget.render(actual)\n framework = 'webengine' if WEBENGINE else 'webkit'\n if sys.platform.startswith('win') and not WEBENGINE:\n framework += '_win'\n elif sys.platform.startswith('darwin'):\n framework += '_osx'\n expected = os.path.join(DATA, framework, filename)\n try:\n msg = compare_images(expected, actual, tol=1.5)\n except Exception:\n msg = 'Image comparison failed:'\n print_exc()\n\n if msg is not None:\n\n from base64 import b64encode\n\n with open(expected, 'rb') as f:\n expected = b64encode(f.read()).decode()\n\n with open(actual, 'rb') as f:\n actual = b64encode(f.read()).decode()\n\n if os.environ.get('CI', 'false').lower() == 'true':\n print(REPRODUCABILITY_SCRIPT.format(actual=actual, expected=expected))\n\n pytest.fail(msg, pytrace=False)\n\n\ndef test_full(tmpdir, capsys, wwt_qt_client):\n\n # Test a whole session, with image comparison along the way.\n\n wwt = wwt_qt_client\n wwt.foreground_opacity = 1.\n\n # The crosshairs are currently broken on Mac/Linux but work on Windows.\n # For consistency, we turn it off here so that the results are the same\n # on all platforms.\n wwt.crosshairs = False\n\n wwt.wait(4)\n\n assert_widget_image(tmpdir, wwt, 'test_full_step0.png')\n\n gc = SkyCoord(0, 0, unit=('deg', 'deg'), frame='galactic')\n wwt.center_on_coordinates(gc, 60 * u.deg)\n\n wwt.wait(4)\n\n assert_widget_image(tmpdir, wwt, 'test_full_step1.png')\n\n wwt.constellation_boundary_color = 'red'\n wwt.constellation_figure_color = 'green'\n wwt.constellation_selection_color = 'blue'\n\n wwt.constellation_boundaries = True\n wwt.constellation_figures = True\n\n wwt.wait(4)\n\n assert_widget_image(tmpdir, wwt, 'test_full_step2.png')\n\n wwt.constellation_selection = True\n\n wwt.crosshairs = False\n wwt.ecliptic = True\n wwt.grid = True\n\n wwt.wait(4)\n\n assert_widget_image(tmpdir, wwt, 'test_full_step3.png')\n\n wwt.foreground = 'SFD Dust Map (Infrared)'\n\n wwt.wait(4)\n\n assert_widget_image(tmpdir, wwt, 'test_full_step4.png')\n\n wwt.foreground = \"Black Sky Background\"\n wwt.background = \"Black Sky Background\"\n wwt.foreground_opacity = 0\n\n wwt.center_on_coordinates(gc, 30 * u.deg)\n\n coord = SkyCoord(5, 0.5, unit=('deg', 'deg'), frame='galactic')\n\n circle1 = wwt.add_circle()\n circle1.set_center(coord)\n circle1.radius = 10 * u.pixel\n circle1.line_width = 5 * u.pixel\n circle1.line_color = 'green'\n circle1.fill = False\n circle1.opacity = 0.5\n\n coord = SkyCoord(-5, -0.5, unit=('deg', 'deg'), frame='galactic')\n\n circle2 = wwt.add_circle()\n circle2.set_center(coord)\n circle2.radius = 2 * u.degree\n circle2.line_width = 5 * u.pixel\n circle2.line_color = 'green'\n circle2.fill = True\n circle2.fill_color = 'orange'\n circle2.opacity = 1\n\n coord = SkyCoord([1, 4, 0], [0, 0, -5], unit=('deg', 'deg'), frame='galactic')\n\n poly = wwt.add_polygon()\n poly.add_point(coord[0])\n poly.add_point(coord[1])\n poly.add_point(coord[2])\n poly.fill = True\n poly.line_color = 'red'\n poly.fill_color = 'yellow'\n poly.line_width = 2 * u.pixel\n\n coord = SkyCoord([10, 5, 2], [5, 2, 2], unit=('deg', 'deg'), frame='galactic')\n\n polyline = wwt.add_line()\n polyline.add_point(coord[0])\n polyline.add_point(coord[1])\n polyline.add_point(coord[2])\n polyline.color = 'green'\n polyline.width = 3 * u.pixel\n\n wwt.wait(4)\n\n assert_widget_image(tmpdir, wwt, 'test_full_step5.png')\n", "sub_path": "pywwt/tests/test_qt_widget.py", "file_name": "test_qt_widget.py", "file_ext": "py", "file_size_in_byte": 5944, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "astropy.coordinates.SkyCoord.from_name", "line_number": 14, "usage_type": "call"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 34, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 34, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 39, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 39, "usage_type": "name"}, {"api_name": "qtpy.QtWebEngineWidgets.WEBENGINE", "line_number": 82, "usage_type": "name"}, {"api_name": "sys.platform.startswith", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 83, "usage_type": "attribute"}, {"api_name": "qtpy.QtWebEngineWidgets.WEBENGINE", "line_number": 83, "usage_type": "name"}, {"api_name": "sys.platform.startswith", "line_number": 85, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "matplotlib.testing.compare.compare_images", "line_number": 89, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 92, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 99, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 102, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 104, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pytest.fail", "line_number": 107, "usage_type": "call"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 126, "usage_type": "call"}, {"api_name": "astropy.units.deg", "line_number": 127, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 127, "usage_type": "name"}, {"api_name": "astropy.units.deg", "line_number": 164, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 164, "usage_type": "name"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 166, "usage_type": "call"}, {"api_name": "astropy.units.pixel", "line_number": 170, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 170, "usage_type": "name"}, {"api_name": "astropy.units.pixel", "line_number": 171, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 171, "usage_type": "name"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 176, "usage_type": "call"}, {"api_name": "astropy.units.degree", "line_number": 180, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 180, "usage_type": "name"}, {"api_name": "astropy.units.pixel", "line_number": 181, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 181, "usage_type": "name"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 187, "usage_type": "call"}, {"api_name": "astropy.units.pixel", "line_number": 196, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 196, "usage_type": "name"}, {"api_name": "astropy.coordinates.SkyCoord", "line_number": 198, "usage_type": "call"}, {"api_name": "astropy.units.pixel", "line_number": 205, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 205, "usage_type": "name"}]} +{"seq_id": "278866280", "text": "# MNIST 손글씨 숫자 분류 신경망(Neural Network for MNIST Handwritten Digit Classification): 정확도 산출\nimport os\nimport sys\nimport numpy as np\nfrom pathlib import Path\ntry:\n sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))\n from mnist import init_network, load_mnist\n from common import sigmoid, softmax\nexcept ImportError:\n print('Library Module Can Not Found')\n\n# 1. 매개변수(w, b) 데이터 셋 가져오기\nnetwork = init_network()\n\nw1, w2, w3 = network['W1'], network['W2'], network['W3']\nb1, b2, b3 = network['b1'], network['b2'], network['b3']\n\n# 2. 학습/시험 데이터 가져오기\n(train_x, train_t), (test_x, test_t) = load_mnist(normalize=True, flatten=True, one_hot_label=False)\n\n# 3. 정확도 산출\nhit = 0\nxlen = len(test_x)\nfor idx in range(xlen):\n x = test_x[idx]\n\n a1 = np.dot(x, w1) + b1\n z1 = sigmoid(a1)\n\n a2 = np.dot(z1, w2) + b2\n z2 = sigmoid(a2)\n\n a3 = np.dot(z2, w3) + b3\n y = softmax(a3)\n\n predict = np.argmax(y)\n t = test_t[idx]\n\n if predict == t:\n hit += 1\n\n# 정확도(Accuracy)\nprint(f'Accuracy: {hit/xlen}')\n\n\n\n\n\n", "sub_path": "02.neural-network/05.mnist-neural-network/ex04.py", "file_name": "ex04.py", "file_ext": "py", "file_size_in_byte": 1132, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "mnist.init_network", "line_number": 14, "usage_type": "call"}, {"api_name": "mnist.load_mnist", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}, {"api_name": "common.sigmoid", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 31, "usage_type": "call"}, {"api_name": "common.sigmoid", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 34, "usage_type": "call"}, {"api_name": "common.softmax", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "469022029", "text": "from selenium import webdriver\n\ndriver = webdriver.Chrome(\"C:/Users/jaeyun/Desktop/chromedriver_win32/chromedriver\")\ndriver.get(\"http://info.nec.go.kr/\")\ndriver.implicitly_wait(5)\ndriver.switch_to_default_content()\ndriver.switch_to_frame(\"main\")\ndriver.find_element_by_xpath(\"\"\"//*[@id=\"header\"]/ul[1]/li[2]/a\"\"\").click()\ndriver.implicitly_wait(5)\n\ndriver.find_element_by_xpath(\"\"\"//*[@id=\"presubmu\"]/li[4]/a\"\"\").click()\ndriver.implicitly_wait(5)\ndriver.find_element_by_xpath(\"\"\"//*[@id=\"header\"]/div[4]/ul/li[5]/a\"\"\").click()\ndriver.implicitly_wait(5)\ndriver.find_element_by_xpath(\"\"\"//*[@id=\"electionType1\"]\"\"\").click()\ndriver.implicitly_wait(5)\ndriver.find_element_by_xpath(\"\"\"//*[@id=\"electionName\"]/option[2]\"\"\").click()\ndriver.implicitly_wait(5)\ndriver.find_element_by_xpath(\"\"\"//*[@id=\"electionCode\"]/option[2]\"\"\").click()\ndriver.implicitly_wait(5)\nsido = driver.find_element_by_id(\"cityCode\")\ndriver.implicitly_wait(5)\nwhile True:\n sido_list = sido.find_elements_by_tag_name(\"option\")\n if len(sido_list) > 2:\n sido_name_values = [option.text for option in sido_list]\n sido_name_values = sido_name_values[2:]\n break\n\ndef get_num(tmp):\n if type(tmp) == type(0): return tmp\n else: return float(tmp.split('(')[0].replace(',',''))\n\ndef move_sido(name):\n element = driver.find_element_by_id(\"cityCode\")\n element.send_keys(name)\n driver.find_element_by_xpath(\"\"\"//*[@id=\"searchBtn\"]\"\"\").click()\n driver.implicitly_wait(5)\n\ndef append_data(df, sido_name, data):\n for each in df[0].values[1:]:\n data['광역시도'].append(sido_name)\n data['시군'].append(each[0])\n data['pop'].append(get_num(each[2]))\n data['moon'].append(get_num(each[3]))\n data['hong'].append(get_num(each[4]))\n data['ahn'].append(get_num(each[5]))\n data['yu'].append(get_num(each[6]))\n data['sim'].append(get_num(each[7]))\n\n\nelection_result_raw = {'광역시도' : [],\n '시군' : [],\n 'pop' : [],\n 'moon' : [],\n 'hong' : [],\n 'ahn' : [],\n 'yu': [],\n 'sim': []}\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nfor each_sido in sido_name_values:\n move_sido(each_sido)\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n table = soup.find('table')\n df = pd.read_html(str(table))\n append_data(df, each_sido, election_result_raw)\n\nelection_result = pd.DataFrame(election_result_raw,columns=['광역시도', '시군', 'pop', 'moon','hong','ahn','yu','sim'])\nelection_result.to_csv(\"C:/Users/jaeyun/Desktop/github/data_analysis/6.Presidential Election/data/presidential.csv\")", "sub_path": "6.Presidential Election/code/PresidentialElection01.py", "file_name": "PresidentialElection01.py", "file_ext": "py", "file_size_in_byte": 2739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 3, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 3, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "150505023", "text": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# Created on 02/06/20 11:46 PM\r\n# @author: Gurusankar G\r\n\r\nfrom __future__ import division\r\n\r\nimport logging\r\nimport time\r\nfrom functools import reduce\r\n\r\nimport pyomo.environ as pyo\r\n\r\nfrom ta_lib.core.utils import get_package_path\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef get_pack_path():\r\n \"\"\"Absolute path for the package.\"\"\"\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")\r\n\r\n\r\ndef TPR_Conv(Model, P):\r\n \"\"\"To pick relevent cutoff value for the chosen TPR value.\"\"\"\r\n if Globals.TPR_Cutoff.iloc[P, 0] > 0:\r\n\r\n return Globals.TPR_Cutoff.iloc[P, 0]\r\n\r\n elif pyo.value(Model.TPR[P]) > Globals.TPR_Cutoff.iloc[P, 3]:\r\n\r\n return Globals.TPR_Cutoff.iloc[P, 2]\r\n else:\r\n\r\n return Globals.TPR_Cutoff.iloc[P, 1]\r\n\r\n\r\ndef Dollar_Sales_Stg1_Fn(Model):\r\n \"\"\"Define pyomo callback for calculating dollar sales.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n Returns:\r\n --------\r\n Pyomo Expression, containing Dollar sales equation\r\n\r\n \"\"\"\r\n return sum(\r\n [\r\n Model.Weeks[P]\r\n * Retailer_Unit_Sales_Fn_Const1(Model, P)\r\n * Retailer_Price_Fn_Const1(Model, P)\r\n + (Globals.Tot_Week - Model.Weeks[P])\r\n * Retailer_Unit_Sales_Fn_Const2(Model, P)\r\n * Retailer_Price_Fn_Const2(Model, P)\r\n for P in Model.PPG_index\r\n ]\r\n )\r\n\r\n\r\ndef Retailer_Unit_Sales_Fn_Const1(Model, P):\r\n \"\"\"Define pyomo callback for calculating product unit sales.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n P : Integer, PPG number to calculate EDLP Unit sales for all\r\n the weeks. Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product unit sales equation\r\n\r\n \"\"\"\r\n Self = pyo.exp(\r\n pyo.log(Model.EDLP[P] * Globals.Base_Price_stg1[P]) * Globals.EDLP_Coef[P][P]\r\n + Globals.Intercepts_stg1[P]\r\n )\r\n Comp = Competitor_Unit_Effect_Stg1_Fn(Model, P)\r\n Unit_Sales = Self * Comp\r\n return Unit_Sales\r\n\r\n\r\ndef Retailer_Unit_Sales_Fn_Const2(Model, P):\r\n \"\"\"Define function for calculating product unit sales.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n P : Integer, PPG number to calculate TPR Unit sales\r\n for all the weeks. Will be iteratively called by Model\r\n Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product unit sales equation\r\n\r\n \"\"\"\r\n Self = pyo.exp(\r\n pyo.log(Globals.Base_Price_stg1[P]) * Globals.EDLP_Coef[P][P]\r\n + Model.TPR[P] * TPR_Conv(Model, P)\r\n + Globals.Intercepts_stg1[P]\r\n )\r\n Comp = Competitor_Unit_Effect_Stg1_Fn(Model, P)\r\n Unit_Sales = Self * Comp\r\n return Unit_Sales\r\n\r\n\r\ndef Retailer_Price_Fn_Const1(Model, P):\r\n \"\"\"Define pyomo callback for calculating product unit price.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n P : Integer, PPG number to calculate EDLP Retailer Price for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product Retailer Price equation\r\n\r\n \"\"\"\r\n Price = Model.EDLP[P] * Globals.Base_Price_stg1[P]\r\n return Price\r\n\r\n\r\ndef Retailer_Price_Fn_Const2(Model, P):\r\n \"\"\"Define function for calculating product unit price.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n P : Integer, PPG number to calculate TPR Retailer Price for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product Retailer Price equation\r\n\r\n \"\"\"\r\n Price = Globals.Base_Price_stg1[P] * (1 - Model.TPR[P] / 100)\r\n return Price\r\n\r\n\r\ndef Competitor_Unit_Effect_Stg1_Fn(Model, Cur_Ret):\r\n \"\"\"Define pyomo callback for calculating competitor effect.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n Cur_Ret : Integer, PPG number to calculate EDLP Competitor Effect for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing Competitor Unit Effect equation\r\n\r\n \"\"\"\r\n\r\n return 1\r\n\r\n\r\n# ########################### Trade Spend Constraints #############################\r\ndef Total_Trade_Spent_Bnd_Stg1_Fn(Model, Cur_Ret):\r\n \"\"\"Define pyomo callback for calculating bound for total trade spent.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n Cur_Ret : Integer, PPG number to calculate Total Trade Spent for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product Total Trade Spent equation\r\n\r\n \"\"\"\r\n Val = Model.Weeks[Cur_Ret] * (\r\n Globals.Base_Price_stg1[Cur_Ret] - Retailer_Price_Fn_Const1(Model, Cur_Ret)\r\n ) * Retailer_Unit_Sales_Fn_Const1(Model, Cur_Ret) + (\r\n Globals.Tot_Week - Model.Weeks[Cur_Ret]\r\n ) * (\r\n Globals.Base_Price_stg1[Cur_Ret] - Retailer_Price_Fn_Const2(Model, Cur_Ret)\r\n ) * Retailer_Unit_Sales_Fn_Const2(\r\n Model, Cur_Ret\r\n )\r\n return pyo.inequality(\r\n Globals.Target_Trade_Spend[Cur_Ret] * (1 - Globals.Ov_Perc_Limit / 100),\r\n Val,\r\n Globals.Target_Trade_Spend[Cur_Ret] * (1 + Globals.Ov_Perc_Limit / 100),\r\n )\r\n\r\n\r\ndef EDLP_Trade_Spent_Bnd_Stg1_Fn(Model, Cur_Ret):\r\n \"\"\"Define pyomo callback for calculating bound for EDLP trade spent.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n Cur_Ret : Integer, PPG number to calculate EDLP Trade Spent for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product EDLP Trade Spent equation\r\n\r\n \"\"\"\r\n Val = (\r\n Model.Weeks[Cur_Ret]\r\n * (Globals.Base_Price_stg1[Cur_Ret] - Retailer_Price_Fn_Const1(Model, Cur_Ret))\r\n * Retailer_Unit_Sales_Fn_Const1(Model, Cur_Ret)\r\n )\r\n\r\n return pyo.inequality(\r\n Globals.Target_EDLP_Spend[Cur_Ret] * (1 - Globals.EDLP_Perc_Limit / 100),\r\n Val,\r\n Globals.Target_EDLP_Spend[Cur_Ret] * (1 + Globals.EDLP_Perc_Limit / 100),\r\n )\r\n\r\n\r\ndef TPR_Spend_calc_edge_case(Model, P):\r\n \"\"\"Define pyomo callback for calculating bound for EDLP trade spent.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n P : Integer, PPG number to calculate TPR Trade Spent for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product TPR Trade Spent equation\r\n\r\n \"\"\"\r\n Self = pyo.exp(\r\n pyo.log(Globals.Base_Price_stg1[P]) * Globals.EDLP_Coef[P][P]\r\n + Globals.TPR_LB * TPR_Conv(Model, P)\r\n + Globals.Intercepts_stg1[P]\r\n )\r\n Comp_Retailers_Unit_Sales = [\r\n pyo.exp((pyo.log(Globals.Base_Price_stg1[i]) * Globals.EDLP_Coef[P][i]))\r\n for i in Model.PPG_index\r\n if i != P\r\n ]\r\n Comp = reduce(lambda x, y: x * y, Comp_Retailers_Unit_Sales, 1)\r\n Unit_Sales = Self * Comp\r\n Price = Globals.Base_Price_stg1[P] * (1 - Globals.TPR_LB)\r\n\r\n TPR_LB_Spend = (Globals.Base_Price_stg1[P] - Price) * Unit_Sales\r\n\r\n TPR_Spend_Buffer = Globals.Target_Trade_Spend[P] * (\r\n 1 + Globals.Ov_Perc_Limit / 100\r\n ) - Globals.Target_EDLP_Spend[P] * (1 - Globals.EDLP_Perc_Limit / 100)\r\n\r\n if TPR_LB_Spend <= TPR_Spend_Buffer:\r\n Buffer_TPR_Events = int(TPR_Spend_Buffer / TPR_LB_Spend)\r\n return (Buffer_TPR_Events, TPR_Spend_Buffer)\r\n else:\r\n return 0, 0\r\n\r\n\r\ndef TPR_Trade_Spent_Bnd_Stg1_Fn(Model, Cur_Ret):\r\n \"\"\"Define pyomo callback for calculating bound for TPR trade spent.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n Cur_Ret : Integer, PPG number to calculate TPR Trade Spent for all the weeks.\r\n Will be iteratively called by Model Objective function\r\n Returns:\r\n --------\r\n Pyomo Expression, containing product TPR Trade Spent equation\r\n\r\n \"\"\"\r\n\r\n Val = (\r\n (Globals.Tot_Week - Model.Weeks[Cur_Ret])\r\n * Retailer_Unit_Sales_Fn_Const2(Model, Cur_Ret)\r\n * Globals.TE_Val[Cur_Ret][1]\r\n )\r\n\r\n LHS = Globals.Target_TPR_Spend[Cur_Ret] * (1 - Globals.TPR_Perc_Limit / 100)\r\n RHS = Globals.Target_TPR_Spend[Cur_Ret] * (1 + Globals.TPR_Perc_Limit / 100)\r\n if RHS == 0:\r\n Max_Events, UB_TPR_Spend = TPR_Spend_calc_edge_case(Model, Cur_Ret)\r\n if UB_TPR_Spend:\r\n RHS = UB_TPR_Spend\r\n return pyo.inequality(LHS, Val, RHS)\r\n\r\n\r\ndef Overall_Total_Trade_Spent_Stg1_Fn(Model):\r\n \"\"\"To Establish constraint on overall trade spend for pyomo model.\r\n\r\n Parameters\r\n ----------\r\n Model : Pyomo Model Object created with the required variables\r\n Returns:\r\n --------\r\n Pyomo Expression, containing OVerall Trade equation\r\n\r\n \"\"\"\r\n return sum(\r\n [\r\n (\r\n Model.Weeks[Cur_Ret]\r\n * (\r\n Globals.Base_Price_stg1[Cur_Ret]\r\n - Retailer_Price_Fn_Const1(Model, Cur_Ret)\r\n )\r\n * Retailer_Unit_Sales_Fn_Const1(Model, Cur_Ret)\r\n + (Globals.Tot_Week - Model.Weeks[Cur_Ret])\r\n * Retailer_Unit_Sales_Fn_Const2(Model, Cur_Ret)\r\n * Globals.TE_Val[Cur_Ret][1]\r\n )\r\n for Cur_Ret in range(Globals.Tot_Prod)\r\n ]\r\n ) <= sum(Globals.Target_Trade_Spend) * (\r\n 1 + Globals.Retailer_Overall_Sales_Buffer / 100\r\n )\r\n\r\n\r\ndef Create_Model_Stg1():\r\n \"\"\"Create Pyomo model.\r\n\r\n Model consists of\r\n 1. Functions to properly initialize number of weaks and respective bound from global data loader class\r\n 2. Variables to optimize\r\n 3. Objective function\r\n 4. Constraints\r\n\r\n \"\"\"\r\n\r\n def Weeks_Init(Model, PPG_index):\r\n return Globals.EDLP_Events[PPG_index]\r\n\r\n def Weeks_bound(Model, PPG_index):\r\n return (Globals.Min_EDLP_Events[PPG_index], Globals.Max_EDLP_Events[PPG_index])\r\n\r\n def TPR_Bound_Init(Model, P):\r\n\r\n LB = min(Globals.TPR_Perc_Val[P])\r\n UB = max(Globals.TPR_Perc_Val[P])\r\n\r\n return (LB, UB)\r\n\r\n def TPR_Initial(Model, P):\r\n\r\n UB = max(Globals.TPR_Perc_Val[P])\r\n\r\n return UB\r\n\r\n Model = pyo.ConcreteModel(name=\"Spend_Optim\")\r\n Model.PPGs = pyo.Param(initialize=Globals.Tot_Prod, domain=pyo.PositiveIntegers)\r\n Model.PPG_index = pyo.RangeSet(0, Model.PPGs - 1)\r\n Model.EDLP = pyo.Var(\r\n Model.PPG_index,\r\n initialize=Globals.EDLP_UB,\r\n domain=pyo.NonNegativeReals,\r\n bounds=(Globals.EDLP_LB, Globals.EDLP_UB),\r\n )\r\n Model.TPR = pyo.Var(\r\n Model.PPG_index,\r\n initialize=TPR_Initial,\r\n domain=pyo.NonNegativeReals,\r\n bounds=TPR_Bound_Init,\r\n )\r\n Model.Weeks = pyo.Var(\r\n Model.PPG_index,\r\n initialize=Weeks_Init,\r\n domain=pyo.PositiveIntegers,\r\n bounds=Weeks_bound,\r\n )\r\n Model.Obj = pyo.Objective(rule=Dollar_Sales_Stg1_Fn, sense=pyo.maximize)\r\n # Model.Tot_Spent_Bnd = pyo.Constraint(Model.PPG_index, rule=Total_Trade_Spent_Bnd_Stg1_Fn)\r\n Model.EDLP_Bnd = pyo.Constraint(Model.PPG_index, rule=EDLP_Trade_Spent_Bnd_Stg1_Fn)\r\n Model.TPR_Bnd = pyo.Constraint(Model.PPG_index, rule=TPR_Trade_Spent_Bnd_Stg1_Fn)\r\n Model.Overall = pyo.Constraint(rule=Overall_Total_Trade_Spent_Stg1_Fn)\r\n return Model\r\n\r\n\r\ndef Call_Solver_Stg1(PPGs, Wks, name=\"bonmin\", obj=1):\r\n \"\"\"To Generate and call a pyomo model.\r\n\r\n The function generates a generic model which can be ran across solvers\r\n which are supported in the Pyomo framework and can solve MINLP problems\r\n ex-Bonmin, Baron.\r\n\r\n Parameters\r\n ----------\r\n PPG : Integer, Number of Total Products to be passed\r\n Wks : Integer, Number of Total Weeks to be passed\r\n name : String, (Optional) Name of the solver to be used\r\n obj : Initial value of the Objective (Optional) to keep running the solver till the maximum value is reached\r\n Returns:\r\n --------\r\n Pyomo Model, containing Model Object with required variables\r\n\r\n \"\"\"\r\n Model = Create_Model_Stg1()\r\n\r\n path = get_pack_path() + \"scripts/tpo_discrete/bonmin-win32/bonmin.exe\" # noqa\r\n Opt = pyo.SolverFactory(name, executable=path)\r\n start_time = time.time()\r\n try:\r\n Result = Opt.solve(Model)\r\n if str(Result.solver.status) != \"ok\":\r\n raise Exception(\"Solver Status should be OK\")\r\n if str(Result.solver.termination_condition) != \"optimal\":\r\n raise Exception(\"Terminal Condition should be Optimal\")\r\n Globals.Stage1Success = True\r\n except Exception as e:\r\n logger.error(\"\\nError in Solving problem\" + str(e))\r\n end_time = time.time()\r\n Call_Solver_Stg1.EDLP_Val = [pyo.value(Model.EDLP[i]) for i in range(PPGs)]\r\n Call_Solver_Stg1.TPR_Val = [pyo.value(Model.TPR[i]) for i in range(PPGs)]\r\n Call_Solver_Stg1.Num_Events = [pyo.value(Model.Weeks[i]) for i in range(PPGs)]\r\n Call_Solver_Stg1.EDLP_Spend_Values = [\r\n pyo.value(\r\n (Model.Weeks[Cur_Ret])\r\n * (\r\n Globals.Base_Price_stg1[Cur_Ret]\r\n - Retailer_Price_Fn_Const1(Model, Cur_Ret)\r\n )\r\n * Retailer_Unit_Sales_Fn_Const1(Model, Cur_Ret)\r\n )\r\n for Cur_Ret in range(Globals.Tot_Prod)\r\n ]\r\n Call_Solver_Stg1.TPR_Spend_Values = [\r\n pyo.value(\r\n (Globals.Tot_Week - Model.Weeks[Cur_Ret])\r\n * (\r\n Globals.Base_Price_stg1[Cur_Ret]\r\n - Retailer_Price_Fn_Const2(Model, Cur_Ret)\r\n )\r\n * Retailer_Unit_Sales_Fn_Const2(Model, Cur_Ret)\r\n )\r\n for Cur_Ret in range(Globals.Tot_Prod)\r\n ]\r\n Call_Solver_Stg1.Tot_Spend_Values = list(\r\n map(\r\n lambda x, y: x + y,\r\n Call_Solver_Stg1.EDLP_Spend_Values,\r\n Call_Solver_Stg1.TPR_Spend_Values,\r\n )\r\n )\r\n if Globals.Stage1Success and pyo.value(Model.Obj) > obj:\r\n logger.info(\r\n \"\\n\\n Optimizer First Stage Results:\\n##############################\\n\\n\"\r\n )\r\n el_time = \" The Elapsed Time is --- %s Seconds ---\" % (end_time - start_time)\r\n btime = f\"{name} Solver Execution Time: {Result['Solver'][0]['Time']}\"\r\n logger.info(\r\n f\"Elapsed_Time: {el_time}\\n\\nBonmin Time: {btime}\\n\\nObjective_Value: {pyo.value(Model.Obj)}\"\r\n )\r\n logger.info(\r\n f\"Message: {Result['Solver'][0]['Message']},Termination: {Result['Solver'][0]['Termination condition']},Status: {Result['Solver'][0]['Status']},Objective_Value: {pyo.value(Model.Obj)}\",\r\n )\r\n Call_Solver_Stg1.Obj_Val = pyo.value(Model.Obj)\r\n Globals.Stage1Success = True\r\n else:\r\n Call_Solver_Stg1.Obj_Val = obj\r\n Globals.Stage1Success = False\r\n\r\n\r\ndef Stage1_caller(global_vars):\r\n \"\"\"Task of the function is to get the initialized global data class.\r\n\r\n use the solver and load results back to the global data class thereby\r\n handing over control to second stage\r\n Parameters\r\n ----------\r\n global_vars : Object, Globally defined variables is being passed\r\n\r\n \"\"\"\r\n global Globals\r\n Globals = global_vars\r\n\r\n logger.info(f\"\\n\\nSolving First Stage : Ret {Globals.Ret} Cat {Globals.Cat}\")\r\n Call_Solver_Stg1(PPGs=Globals.Tot_Prod, Wks=Globals.Tot_Week, name=\"bonmin\", obj=1)\r\n Globals.stage1_EDLP_Init_PPG = Call_Solver_Stg1.EDLP_Val\r\n Globals.stage1_TPR_Init_PPG = Call_Solver_Stg1.TPR_Val\r\n Globals.stage1_Num_Events_PPG = Call_Solver_Stg1.Num_Events\r\n Globals.stage1_EDLP_Spend_Values = Call_Solver_Stg1.EDLP_Spend_Values\r\n Globals.stage1_TPR_Spend_Values = Call_Solver_Stg1.TPR_Spend_Values\r\n Globals.stage1_Tot_Spend_Values = Call_Solver_Stg1.Tot_Spend_Values\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Globals.init(1, 1)\r\n Stage1_caller()\r\n", "sub_path": "stage_1_tpo_optimizer.py", "file_name": "stage_1_tpo_optimizer.py", "file_ext": "py", "file_size_in_byte": 16451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "ta_lib.core.utils.get_package_path", "line_number": 21, "usage_type": "call"}, {"api_name": "pyomo.environ.value", "line_number": 30, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 30, "usage_type": "name"}, {"api_name": "pyomo.environ.exp", "line_number": 75, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 75, "usage_type": "name"}, {"api_name": "pyomo.environ.log", "line_number": 76, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 76, "usage_type": "name"}, {"api_name": "pyomo.environ.exp", "line_number": 98, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 98, "usage_type": "name"}, {"api_name": "pyomo.environ.log", "line_number": 99, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 99, "usage_type": "name"}, {"api_name": "pyomo.environ.inequality", "line_number": 182, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 182, "usage_type": "name"}, {"api_name": "pyomo.environ.inequality", "line_number": 208, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 208, "usage_type": "name"}, {"api_name": "pyomo.environ.exp", "line_number": 228, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 228, "usage_type": "name"}, {"api_name": "pyomo.environ.log", "line_number": 229, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 229, "usage_type": "name"}, {"api_name": "pyomo.environ.exp", "line_number": 234, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 234, "usage_type": "name"}, {"api_name": "pyomo.environ.log", "line_number": 234, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 238, "usage_type": "call"}, {"api_name": "pyomo.environ.inequality", "line_number": 281, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 281, "usage_type": "name"}, {"api_name": "pyomo.environ.ConcreteModel", "line_number": 345, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 345, "usage_type": "name"}, {"api_name": "pyomo.environ.Param", "line_number": 346, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 346, "usage_type": "name"}, {"api_name": "pyomo.environ.PositiveIntegers", "line_number": 346, "usage_type": "attribute"}, {"api_name": "pyomo.environ.RangeSet", "line_number": 347, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 347, "usage_type": "name"}, {"api_name": "pyomo.environ.Var", "line_number": 348, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 348, "usage_type": "name"}, {"api_name": "pyomo.environ.NonNegativeReals", "line_number": 351, "usage_type": "attribute"}, {"api_name": "pyomo.environ", "line_number": 351, "usage_type": "name"}, {"api_name": "pyomo.environ.Var", "line_number": 354, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 354, "usage_type": "name"}, {"api_name": "pyomo.environ.NonNegativeReals", "line_number": 357, "usage_type": "attribute"}, {"api_name": "pyomo.environ", "line_number": 357, "usage_type": "name"}, {"api_name": "pyomo.environ.Var", "line_number": 360, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 360, "usage_type": "name"}, {"api_name": "pyomo.environ.PositiveIntegers", "line_number": 363, "usage_type": "attribute"}, {"api_name": "pyomo.environ", "line_number": 363, "usage_type": "name"}, {"api_name": "pyomo.environ.Objective", "line_number": 366, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 366, "usage_type": "name"}, {"api_name": "pyomo.environ.maximize", "line_number": 366, "usage_type": "attribute"}, {"api_name": "pyomo.environ.Constraint", "line_number": 368, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 368, "usage_type": "name"}, {"api_name": "pyomo.environ.Constraint", "line_number": 369, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 369, "usage_type": "name"}, {"api_name": "pyomo.environ.Constraint", "line_number": 370, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 370, "usage_type": "name"}, {"api_name": "pyomo.environ.SolverFactory", "line_number": 395, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 395, "usage_type": "name"}, {"api_name": "time.time", "line_number": 396, "usage_type": "call"}, {"api_name": "time.time", "line_number": 406, "usage_type": "call"}, {"api_name": "pyomo.environ.value", "line_number": 407, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 407, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 408, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 408, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 409, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 409, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 411, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 411, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 422, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 422, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 439, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 439, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 446, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 446, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 449, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 449, "usage_type": "name"}, {"api_name": "pyomo.environ.value", "line_number": 451, "usage_type": "call"}, {"api_name": "pyomo.environ", "line_number": 451, "usage_type": "name"}]} +{"seq_id": "315635258", "text": "#!/usr/bin/env python3\n# Course: CSCI 6509\n# Author: Nirav Jadeja\n# Description: Solution a3q6.py\n# Python Version: 3.7\n\nimport sys\nfrom nltk.util import ngrams\n\n\nclass DataProcessing:\n\n def __init__(self, ngramSize, profileLength, inputFileName, outputFileName):\n self.ngramSize = ngramSize\n self.profileLength = profileLength\n self.inputFileName = inputFileName\n self.outputFileName = outputFileName\n\n def preprocessData(self):\n with open(self.inputFileName) as file:\n fileLine=\" \".join(line.strip() for line in file)\n\n fileLineLength = len(fileLine) + 1\n tempFileLine = (fileLine.rjust(fileLineLength)).ljust(fileLineLength + 1)\n data = tempFileLine.replace(r\" \", \"_\")\n return list(ngrams(data, self.ngramSize))\n\n def fileWrite(self, inputDict):\n count = 0\n tempDict = {}\n file=open(self.outputFileName + \".ngrams\", \"w+\")\n for key,value in sorted(inputDict.items(), key=lambda x:(-x[1],x[0])):\n if count < self.profileLength:\n tempDict[key] = value\n file.write(\"%s %.16f\\n\" %(key, value))\n count += 1\n file.close()\n return tempDict\n\n def findTotalCounts(self, grams):\n tempString, tempList, frequencyCounter = \"\", [], {}\n for i in range(0, len(grams)):\n for j in range(0, len(grams[i])):\n tempString = tempString + grams[i][j]\n tempList.append(tempString)\n tempString = \"\"\n\n totalSum = len(tempList)\n for i in tempList:\n frequencyCounter[i] = tempList.count(i)\n return frequencyCounter, totalSum\n\n def findKeyValues(self, frequencyCounter, totalSum):\n floatValueList = []\n for key, value in frequencyCounter.items():\n frequencyCounter[key] = (value / totalSum)\n floatValueList.append(frequencyCounter[key])\n return floatValueList\n\n\nclass IDataProcessing():\n\n def mainMethod(ngramSize, profileLength, inputFileName, outputFileName):\n dataProcessing = DataProcessing(ngramSize, profileLength, inputFileName, outputFileName)\n grams = dataProcessing.preprocessData()\n frequencyCounter, totalSum = dataProcessing.findTotalCounts(grams)\n floatValueList = dataProcessing.findKeyValues(frequencyCounter, totalSum)\n dictValues = dataProcessing.fileWrite(frequencyCounter)\n return dictValues\n\n\nclass DataOperations:\n\n def __init__(self, dictA, dictB):\n self.dictA = dictA\n self.dictB = dictB\n\n def unionOfLists(self):\n result = list(set(self.dictA) | set(self.dictB))\n return result\n\n def compareValues(self, sortedValues):\n tempList = []\n for value in range(0, len(sortedValues)):\n if sortedValues[value] in self.dictA:\n if sortedValues[value] in self.dictB:\n tempList.append([self.dictA[sortedValues[value]], self.dictB[sortedValues[value]]])\n else:\n tempList.append([self.dictA[sortedValues[value]], 0])\n else:\n tempList.append([self.dictB[sortedValues[value]], 0])\n return tempList\n\n def calculateValues(self, comparedValues):\n tempList = []\n for i in range (0, len(comparedValues)):\n x = (2*((comparedValues[i][0] - comparedValues[i][1]) / (comparedValues[i][0] + comparedValues[i][1]))) ** 2\n tempList.append(x)\n return tempList\n\n\nclass IDataOperations:\n\n def mainMethod(dictA, dictB):\n dataOperaions = DataOperations(dictA, dictB)\n sortedValues = sorted(dataOperaions.unionOfLists())\n comparedValues = dataOperaions.compareValues(sortedValues)\n calculatedValues = dataOperaions.calculateValues(comparedValues)\n return calculatedValues\n\n\nclass MainClass:\n\n def exceptionMthod():\n try:\n arguments = sys.argv[1:]\n ngramSize = int(arguments[0])\n profileLength = int(arguments[1])\n inputFileA = str(arguments[2])\n inputFileB = str(arguments[3])\n\n MainClass.subMethod(ngramSize, profileLength, inputFileA, inputFileB)\n\n except Exception as error:\n ngramSize = int(input(\"enter ngram size: \"))\n profileLength = int(input(\"enter profile length: \"))\n inputFileA = str(input(\"Enter file A (e.g: a.txt): \"))\n inputFileB = str(input(\"Enter file B (e.g: b.txt): \"))\n\n MainClass.subMethod(ngramSize, profileLength, inputFileA, inputFileB)\n\n def subMethod(ngramSize, profileLength, inputFileA, inputFileB):\n dictA = IDataProcessing.mainMethod(ngramSize, profileLength, inputFileA, \"a3q6-test1a\")\n dictB = IDataProcessing.mainMethod(ngramSize, profileLength, inputFileB, \"a3q6-test1b\")\n calculatedValues = IDataOperations.mainMethod(dictA, dictB)\n print(\"CNG for %s %s a3q6-test1a a3q6-test1b: %.13f\" %( ngramSize, profileLength, sum(calculatedValues)))\n\nif __name__ == \"__main__\":\n\n MainClass.exceptionMthod()\n", "sub_path": "a3/a3q6.py", "file_name": "a3q6.py", "file_ext": "py", "file_size_in_byte": 5083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "nltk.util.ngrams", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "425581226", "text": "#coding=utf-8\nimport web\nurls=(\n'/','index',\n'/movies/(\\d+)','movies', #访问/movies/(d+),将调用movies.html\n\t )\nrender = web.template.render(\"templates/\")\ndb = web.database(dbn='sqlite',db='MovieSite.db')\nclass index:\n\tdef GET(self):\n\t\tmovies = db.select('movies')\n\t\treturn render.index(movies)\n\tdef POST(self):\n\t\tdata = web.input()\n\t\tcondition = r'title like \"%' + data.title + r'%\"'\n\t\tmovies = db.select('movies', where=condition)\n\t\treturn render.index(movies)\n\nclass movies:\n\tdef GET(self,movie_id):\n\t\tmovie_id = int(movie_id)\n\t\tmovie = db.select('movies', where='id=$movie_id', vars=locals())[0]\n\t\treturn render.movies(movie)\n\nif __name__ == \"__main__\":\n\tapp = web.application(urls,globals())\n\tapp.run()\n", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "web.template.render", "line_number": 7, "usage_type": "call"}, {"api_name": "web.template", "line_number": 7, "usage_type": "attribute"}, {"api_name": "web.database", "line_number": 8, "usage_type": "call"}, {"api_name": "web.input", "line_number": 14, "usage_type": "call"}, {"api_name": "web.application", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "145018213", "text": "import tensorflow as tf \nimport matplotlib.pyplot as plt \n\ndef load_data(show=False):\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n # Normalizing the data \n x_train = x_train/255\n x_test = x_test/255\n\n if show:\n # Showing the data \n plt.figure(figsize=(10, 10))\n for i in range(10):\n plt.subplot(4, 3, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(x_train[i])\n plt.xlabel(y_train[i])\n plt.show()\n\n # Covolution layer need's 3 dimension to procedure\n x_train = x_train[:,:,:,tf.newaxis]\n x_test = x_test[:,:,:,tf.newaxis]\n\n # print(x_test.shape)\n\n return x_train, y_train, x_test, y_test \n\n\nload_data()", "sub_path": "Mnist OpenCV/loadData.py", "file_name": "loadData.py", "file_ext": "py", "file_size_in_byte": 796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tensorflow.keras", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "tensorflow.newaxis", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.newaxis", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "408615568", "text": "import scipy.io as sio\nimport numpy as np\n\n\ndef loadData(path, transpose=True):\n data = sio.loadmat(path)\n y = data.get('y')\n y = y.reshape(y.shape[0])\n X = data.get('X')\n\n if transpose:\n X = np.array([im.reshape((20, 20)).T for im in X])\n X = np.array([im.reshape(400) for im in X])\n\n return X, y\n", "sub_path": "ex4 NN back propagation/loadData.py", "file_name": "loadData.py", "file_ext": "py", "file_size_in_byte": 330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scipy.io.loadmat", "line_number": 6, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "325260252", "text": "import sqlite3\nimport datetime\nimport random as rd\n\ndb_name = 'db.sqlite3' \n\n\ndef run_query(query, parameters=(), database=db_name):\n with sqlite3.connect(database) as conn:\n cursor = conn.cursor()\n result = cursor.execute(query, parameters)\n conn.commit()\n\n return result\n\nfields = [\n 'password',\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'is_staff',\n 'dni',\n 'nacimiento',\n 'donante',\n 'is_superuser',\n 'is_active',\n 'date_joined',\n\n]\n\n#-----------------> UN REGISTRO EJEMPLO <----------------\n\"\"\" \nparameters = (\n '123456789',\n 'danielito1',\n 'daniel',\n 'benitez',\n 'daniel@benitez.com',\n 0,\n 123456789,\n random_date(), \n 1,\n 0,\n 0,\n datetime.datetime.now(),\n)\n \"\"\"\n\n#<-------------------------------------------------------->\n\n#--------------RANDOM DATES-----------------\ndef random_date():\n start_date = datetime.date(1940, 1, 1)\n end_date = datetime.date(2002, 2, 1)\n time_between_dates = end_date - start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = rd.randrange(days_between_dates)\n random_date = start_date + datetime.timedelta(days=random_number_of_days)\n return random_date\n\n#----------------------------------------------------\n\n\n\nfirst_names = [\n 'Maria',\n 'Ramon',\n 'Ezequiel',\n 'Lucas',\n 'Gabriel',\n 'Martin',\n 'Jose',\n 'Pedro',\n 'Estefania',\n 'Soledad',\n 'Ivan',\n 'Mateo',\n 'Tomas',\n 'Daniel',\n 'Vanina',\n 'Rolando',\n 'Carlos',\n 'Agustin',\n 'Simon',\n 'Pablo',\n 'Julieta',\n\n]\n\nlast_names = [\n 'Ibalo',\n 'Carabajal',\n 'Andino',\n 'Benitez',\n 'Baez',\n 'Martinez',\n 'Chuconuk',\n 'Gonzalez',\n 'Campestrini',\n 'Parera',\n 'Perez',\n 'Cuneo',\n 'Comechingon',\n 'Venegas',\n 'Parra',\n]\n\nN = 5 #Record number\nrecords = list() #Record Collection\nstr_fields = ', '.join(fields)\nn_fields = ', '.join(['?' for _ in fields])\ntable = 'user_usuario'\nadd_query = 'INSERT INTO %s (%s) VALUES (%s)' % (table, str_fields, n_fields)\n\nfor _ in range(N):\n name = rd.choice(first_names)\n l_name = rd.choice(last_names)\n\n parameters = [ \n '123456789', #password\n f'{name}_{l_name}{rd.randint(1,100)}', #username\n name, #first name\n l_name, #last_name\n f'{name}_{rd.randint(1,100)}@{l_name}.com', #email\n 0, #is staff\n rd.randint(10000000, 45000000), #dni\n random_date(), #born_date\n #donante#is_active\n 0, #is_superuser \n 0, #is_active \n datetime.datetime.now() #date_joined\n ]\n run_query(add_query, parameters)\n", "sub_path": "fillingthebase.py", "file_name": "fillingthebase.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sqlite3.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 59, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 117, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 118, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 122, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 125, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}]} +{"seq_id": "432845725", "text": "ITEM_PER_PAGE = 8\n\nfrom searcher import Searcher\nimport copy\nimport json\nfrom flask import Flask, redirect, render_template, request, url_for, jsonify\n\napp = Flask(__name__)\nsearcher = Searcher('pic_index')\n\n@app.route('/', methods=['POST', 'GET'])\ndef searcher_form():\n if request.method == \"POST\":\n # pass all the argument of request.form to the page result.\n return redirect(url_for('result', **request.form, search_type='N', page=1))\n return render_template(\"index.html\")\n\n@app.route('/F-searcher', methods=[\"POST\", \"GET\"])\ndef f_searcher_form():\n if request.method == \"POST\":\n # pass all the argument of request.form to the page result.\n return redirect(url_for('result', **request.form, search_type='F', page=1))\n return render_template(\"F_searcher.html\")\n\n@app.route('/result', methods=['GET'])\ndef result():\n # retrieve the arguments and get the results corresponding to the \n # arguments from the searcher.\n\n request_dict = request.args.to_dict()\n H_checked = 'H-search' in request_dict\n p_checked = 'pic-search' in request_dict\n results = searcher.query(copy.copy(request_dict), H_checked, p_checked)\n\n total_item_num = len(results)\n end_page = total_item_num // ITEM_PER_PAGE + 1\n if total_item_num % ITEM_PER_PAGE == 0:\n end_page -= 1\n start_page = int(request_dict['page'])\n cutted_results = []\n if results:\n cutted_results = results[(start_page-1) * ITEM_PER_PAGE: \n min(len(results), start_page * ITEM_PER_PAGE)]\n\n # raise ValueError\n #render result.html.\n return render_template(\"result.html\", \n **request_dict,\n results=cutted_results, \n len_res=len(results), \n H_checked=H_checked, \n p_checked=p_checked,\n start=start_page,\n end=end_page)\n\nif __name__ == '__main__':\n app.run(debug=True, port=8080)", "sub_path": "lab7/src/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "searcher.Searcher", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args.to_dict", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "searcher.query", "line_number": 33, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "402512180", "text": "# -*- coding: utf-8 -*-\nfrom typing import List\n\n\ndef staircase(size: int) -> List[str]:\n \"\"\"\n >>> staircase(4)\n [' #', ' ##', ' ###', '####']\n \"\"\"\n # ret = [f\"{'#' * i:>{size}}\" for i in range(1, size+1)]\n ret = [('#' * i).rjust(size) for i in range(1, size+1)]\n return ret\n\n\nif __name__ == '__main__':\n print(*staircase(int(input())), sep='\\n')\n", "sub_path": "algorithms/easy/warmup/staircase.py", "file_name": "staircase.py", "file_ext": "py", "file_size_in_byte": 374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "224471492", "text": "import sys, pygame\nfrom pygame.locals import *\n\npygame.init()\nsize = width, height = 512, 768\npygame.display.set_caption(\"Asteroid Dodger\")\nscreen = pygame.display.set_mode(size)\n\nbackground_image = pygame.image.load(\"background.png\").convert()\nbackground_rect = background_image.get_rect()\n\nspaceship_image = pygame.image.load(\"spaceship.png\")\nspaceship_rect = spaceship_image.get_rect()\n\n\nwhile 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n screen.blit(background_image, background_rect)\n screen.blit(spaceship_image, spaceship_rect)\n pygame.display.flip()", "sub_path": "progress/progress/04.pyw", "file_name": "04.pyw", "file_ext": "pyw", "file_size_in_byte": 636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.init", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "40183316", "text": "import astropy.table as tbl\n\nfilename='obsobj.txt'\ntt=tbl.Table.read(filename,format='ascii')\n\nind=np.where(tt['col2']=='B')\n\n#tt.sort(['col2'])\n\ntt[ind].write('bobj.txt',format='ascii',names=('#name', 'filter', 'obj', 'type', 'exp', 'etc'))\n", "sub_path": "astropytable_example.py", "file_name": "astropytable_example.py", "file_ext": "py", "file_size_in_byte": 242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "astropy.table.Table.read", "line_number": 4, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 4, "usage_type": "attribute"}, {"api_name": "astropy.table", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "60262914", "text": "# -*-coding: UTF-8 -*-\nimport requests\n\n\nclass MbGameOps:\n def __init__(self, area_id, vmid):\n self.area_id = str(area_id)\n self.vmid = str(vmid)\n if self.area_id == '3.19':\n pass\n else:\n self.mes = self.getAreaDockerMes()\n\n def getAreaDockerMes(self):\n url = f\"http://ops.vrviu.io/api/v1/mobile/container?exact__area_type={self.area_id}&page_size=10&vm_status=true&in__vmid={self.vmid}\"\n resp = requests.get(url)\n # print(resp.text)\n assert resp.status_code == 200, 'getAreaDockerMes请求失败'\n assert len(resp.json()['data']) > 0, f'{resp.text}=>vmid搞错了?'\n return resp.json()[\"data\"][0]\n\n def getHmidIp(self):\n if self.area_id == '3.19':\n return \"10.86.3.19\"\n else:\n return self.mes[\"hmid_address\"]\n\n def getDockerName(self):\n if self.area_id == '3.19':\n return f\"3_1000_{self.vmid}_2\"\n else:\n return self.mes[\"container_name\"]\n\n def getDockerIp(self):\n if self.area_id == '3.19':\n vmid_ip = {\n '5005': '10.86.5.16',\n '5006': '10.86.5.17',\n '5007': '10.86.5.18',\n '5008': '10.86.5.19',\n '5009': '10.86.5.20',\n }\n assert self.vmid in vmid_ip.keys(), '3.19暂时未配置此vmid!'\n return vmid_ip[self.vmid]\n else:\n return self.mes[\"address\"]\n\n def getPhoneIp(self):\n if self.area_id == '3.19':\n phone_ip = {\n '5005': '10.86.56.74',\n '5006': '10.86.56.73',\n }\n if self.vmid in phone_ip.keys():\n return phone_ip[self.vmid]\n else:\n return ''\n else:\n return self.mes[\"phone_ip\"]\n\n\nclass LsConfig(MbGameOps):\n def __init__(self, area_id, vmid) -> None:\n MbGameOps.__init__(self, area_id, vmid)\n self.docker_ip = self.getDockerIp()\n\n def modifyLsConf(self, **kwargs):\n url = f\"http://{self.docker_ip}:7001/setOptions\"\n data = {}\n if 'debug_touch_event' in kwargs.keys():\n data['debugTouchEvnet'] = int(kwargs['debug_touch_event']) # 1=>open 打开客户端触摸点击等事件\n if 'encode_video_duration' in kwargs.keys():\n data['saveEncodedVideoDuration'] = int(kwargs['encode_video_duration']) # 开启录屏保存在lightStreamer/var/\n if 'sei_info' in kwargs.keys():\n data['enableSEIInfo'] = int(kwargs['sei_info'])\n print(data)\n rep = requests.post(url, json=data)\n print(rep.text)\n assert rep.status_code == 200, 'ls调试配置请求失败'\n\n\nif __name__ == '__main__':\n mb = MbGameOps(\"1\", \"1\")\n mes = mb.get_area_docker_mes()\n mb.get_hmid_ip(mes)\n", "sub_path": "utils/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 2845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "404786106", "text": "# -*- coding:utf-8 -*-\r\n# Anaconda 4.3.0 環境\r\n\r\n\"\"\"\r\n 更新情報\r\n [17/08/24] : 新規作成\r\n [17/08/28] : クラスを名,ファイル名を EnsembleLearningClassifier → EnsembleModelClassifier に改名\r\n\r\n\"\"\"\r\n\r\nimport numpy\r\nimport pandas\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom scipy.misc import comb # 組み合わせの計算 \r\nimport math #\r\n\r\nimport operator # ?\r\n\r\n# scikit-learn ライブラリ関連\r\nfrom sklearn.base import BaseEstimator # 推定器 Estimator の上位クラス. get_params(), set_params() 関数が定義されている.\r\nfrom sklearn.base import ClassifierMixin # 推定器 Estimator の上位クラス. score() 関数が定義されている.\r\nfrom sklearn.preprocessing import LabelEncoder # \r\n \r\nfrom sklearn.externals import six # ? \r\nfrom sklearn.base import clone # ?\r\nfrom sklearn.pipeline import _name_estimators # ?\r\n\r\n\r\nclass EnsembleModelClassifier( BaseEstimator, ClassifierMixin ):\r\n \"\"\"\r\n アンサンブルモデルの識別器 classifier の自作クラス.\r\n scikit-learn ライブラリの推定器 estimator の基本クラス BaseEstimator, ClassifierMixin を継承している.\r\n\r\n -------------------------------------------------------------------------------------------------------------------------------\r\n コンストラクタ __init()__ の引数と同名のオブジェクトの属性を設定する必要あり(上位クラスの BaseEstimator仕様) \r\n \r\n 参考: sklearn.base.BaseEstimator の Note\r\n Notes\r\n All estimators should specify all the parameters that can be set at the class level in their __init__ as explicit keyword arguments (no *args or **kwargs).\r\n http://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html#sklearn.base.BaseEstimator\r\n --------------------------------------------------------------------------------------------------------------------------------\r\n [protedted] protedted な使用法を想定 \r\n classifiers : list \r\n 分類器のクラスのオブジェクトのリスト : __init()__ の引数と同名のオブジェクトの属性\r\n\r\n class_labels : list\r\n 分類器のクラスのラベル名のリスト : __init()__ の引数と同名のオブジェクトの属性\r\n vote_method : str ( \"majority_vote\" or \"probability_vote\" )\r\n アンサンブルによる最終的な判断判断手法 : __init()__ の引数と同名のオブジェクトの属性\r\n \"majority_vote\" : 弱識別器の多数決で決定する.多数決方式 (=クラスラベルの argmax() 結果)\r\n \"probability_vote\" : 弱識別器の重み付け結果で決定する.(=クラスの所属確率の argmax() 結果)\r\n\r\n weights : list \r\n 各分類器の対する重みの値のリスト : __init()__ の引数と同名のオブジェクトの属性\r\n\r\n named_classifier :\r\n \r\n\r\n [public] public アクセス可能なインスタスンス変数には, 便宜上変数名の最後にアンダースコア _ を付ける.\r\n\r\n classifiers_ : list \r\n classifiers の public アクセスを想定\r\n class_labels : list\r\n class_labels の public アクセスを想定\r\n\r\n encoder_ : sklearn.preprocessing.LabelEncoder のオブジェクト\r\n\r\n classes_ : dict\r\n\r\n\r\n [private] 変数名の前にダブルアンダースコア __ を付ける(Pythonルール)\r\n\r\n __n_classifier : int\r\n アンサンブル分類器を構成する分類器の個数\r\n \r\n \"\"\"\r\n \r\n def __init__( self , classifiers, class_labels = [], vote_method = \"majority_vote\", weights = None ):\r\n \"\"\"\r\n コンストラクタ(厳密にはイニシャライザ)\r\n 引数と同名のオブジェクトの属性を設定する必要あり(上位クラスの BaseEstimator仕様)\r\n \r\n [Input]\r\n classifiers_ : list\r\n 分類器のクラスのオブジェクトのリスト\r\n \r\n\r\n \"\"\"\r\n self.classifiers = classifiers\r\n \r\n self.class_labels = class_labels\r\n self.class_labels_ = class_labels\r\n\r\n self.weights = weights\r\n\r\n if classifiers != None:\r\n self.__n_classifier = len( classifiers )\r\n else:\r\n self.__n_classifier = 0\r\n\r\n self.vote_method = vote_method\r\n\r\n # ?\r\n if classifiers != None:\r\n self.named_classifiers = { key: value \r\n for key, value in _name_estimators(classifiers) }\r\n else:\r\n self.named_classifiers = {}\r\n\r\n return\r\n\r\n def print( self, str = \"\" ):\r\n \"\"\"\r\n\r\n \"\"\"\r\n print(\"\\n\")\r\n print(\"-------------------------------------------------------------------\")\r\n print( str )\r\n print( \"\\n[Attributes]\" )\r\n\r\n print( \"classifiers :\" )\r\n for clf in self.classifiers:\r\n print( \" \", clf )\r\n\r\n print( \"__n_classifier : \", self.__n_classifier )\r\n print( \"class_labels : \", self.class_labels )\r\n print( \"weights : \", self.weights )\r\n print( \"vote_method : \", self.vote_method )\r\n\r\n print( \"\\n[self]\\n\", self )\r\n print(\"-------------------------------------------------------------------\")\r\n \r\n return\r\n\r\n def fit( self, X_train, y_train ):\r\n \"\"\"\r\n 識別器に対し, 指定されたデータで fitting を行う関数\r\n scikit-learn ライブラリの識別器 : classifiler, 推定器 : estimator が持つ共通関数\r\n\r\n [Input]\r\n X_train : numpy.ndarray ( shape = [n_samples, n_features] )\r\n トレーニングデータ(特徴行列)\r\n\r\n y_train : numpy.ndarray ( shape = [n_samples] )\r\n トレーニングデータ用のクラスラベル(教師データ)のリスト\r\n\r\n [Output]\r\n self : 自身のオブジェクト\r\n\r\n \"\"\"\r\n # ? LabelEncoder クラスを使用して, クラスラベルが 0 から始まるようにする.\r\n # ? これは, self.predict() 関数内の numpy.argmax() 関数呼び出し時に重要となるためである.\r\n self.encoder_ = LabelEncoder()\r\n self.encoder_.fit( y_train )\r\n self.classes = self.encoder_.classes_\r\n\r\n # public アクセス可能な分類器のリストを初期化\r\n self.classifiers_ = []\r\n\r\n # self.classifiers に設定されている分類器のクローン clone(clf) で fitting し, \r\n # public アクセス可能な classifiers_ に格納\r\n for clf in self.classifiers:\r\n\r\n # clone() : 同じパラメータの 推定器を生成\r\n fitted_clf = clone(clf).fit( X_train, self.encoder_.transform(y_train) )\r\n self.classifiers_.append( fitted_clf )\r\n\r\n return self\r\n\r\n\r\n def predict( self, X_features ):\r\n \"\"\"\r\n 識別器に対し, fitting された結果を元に, クラスラベルの予想値を返す関数\r\n\r\n [Input]\r\n X_features : numpy.ndarry ( shape = [n_samples, n_features] )\r\n 予想したい特徴行列\r\n [Output]\r\n vote_results : numpy.ndaary ( shape = [n_samples] )\r\n 予想結果(クラスラベル)\r\n \"\"\"\r\n # 初期化\r\n #vote_results = []\r\n\r\n #------------------------------------------------------------------------------------------------------\r\n # アンサンブルの最終決定方式 __vote_method が, 各弱識別器の重み付け方式 \"probability_vote\" のケース\r\n #------------------------------------------------------------------------------------------------------\r\n if self.vote_method == \"probability_vote\":\r\n # numpy.argmax() : 指定した配列の最大要素のインデックスを返す\r\n # axis : 最大値を読み取る軸の方向 ( axis = 1 : shape が2次元配列 行方向)\r\n vote_results = numpy.argmax( self.predict_proba(X_features), axis = 1 )\r\n\r\n #------------------------------------------------------------------------------------------------------ \r\n # アンサンブルの最終決定方式 __vote_method が, 多数決方式 \"majority_vote\" のケース\r\n #------------------------------------------------------------------------------------------------------\r\n else:\r\n # 各弱識別器 clf の predict() 結果を predictions (list) に格納\r\n predictions = [ clf.predict(X_features) for clf in self.classifiers_ ]\r\n #print( \"EnsembleLearningClassifier.fit() { predictions } : \\n\", predictions)\r\n\r\n # ? predictions を 転置し, 行と列 (shape) を反転\r\n # numpy.asarray() : np.array とほとんど同じように使えるが, 引数に ndarray を渡した場合は配列のコピーをせずに引数をそのまま返す。\r\n predictions = numpy.asarray( predictions ).T\r\n #print( \"EnsembleLearningClassifier.fit() { numpy.asarray(predictions).T } : \\n\", predictions)\r\n\r\n # ? 各サンプルの所属クラス確率に重み付けで足し合わせた結果が最大となるようにし、列番号を返すようにする.\r\n # この処理を numpy.apply_along_axis() 関数を使用して実装\r\n # numpy.apply_along_axis() : Apply a function to 1-D slices along the given axis.\r\n # Execute func1d(a, *args) where func1d operates on 1-D arrays and a is a 1-D slice of arr along axis.\r\n vote_results = numpy.apply_along_axis(\r\n lambda x : # func1d : function\r\n numpy.argmax( numpy.bincount( x, weights = self.weights ) ), # \r\n axis = 1, #\r\n arr = predictions # ndarray : Input array\r\n )\r\n\r\n # ? vote_results を LabelEncoder で逆行列化して, shape を反転\r\n #print( \"EnsembleLearningClassifier.fit() { vote_results } : \\n\", vote_results )\r\n vote_results = self.encoder_.inverse_transform( vote_results )\r\n #print( \"EnsembleLearningClassifier.fit() { self.__encoder.inverse_transform( vote_results ) } : \\n\", vote_results )\r\n\r\n return vote_results\r\n\r\n\r\n def predict_proba( self, X_features ):\r\n \"\"\"\r\n 識別器に対し, fitting された結果を元に, クラスの所属確率の予想値を返す関数\r\n\r\n [Input]\r\n X_features : numpy.ndarry ( shape = [n_samples, n_features] )\r\n 予想したい特徴行列\r\n\r\n [Output]\r\n ave_probas : numpy.nadarry ( shape = [n_samples, n_classes] )\r\n 各サンプルの所属クラス確率に重み付けした結果の平均確率\r\n \"\"\"\r\n # 各弱識別器 clf の predict_prpba() 結果を predictions (list) に格納\r\n #predict_probas = [ clf.predict_proba(X_features) for clf in self.classifiers_ ]\r\n #print( \"EnsembleLearningClassifier.predict_proba() { predict_probas } : \\n\", predict_probas )\r\n predict_probas = numpy.asarray( [ clf.predict_proba(X_features) for clf in self.classifiers_ ] )\r\n\r\n # 平均化\r\n ave_probas = numpy.average( predict_probas, axis = 0, weights = self.weights )\r\n #print( \"EnsembleLearningClassifier.predict_proba() { ave_probas } : \\n\", ave_probas )\r\n\r\n return ave_probas\r\n\r\n\r\n def get_params( self, deep = True ):\r\n \"\"\"\r\n 親クラス BaseEstimator の関数 get_params() をオーバーライド\r\n \r\n \"\"\"\r\n if deep == False:\r\n # ?\r\n return super( EnsembleModelClassifier, self ).get_params( deep = False )\r\n else:\r\n # パラメータのディクショナリを設定(グリッドサーチでの使用を想定した構造にする)\r\n # ディクショナリの Key を \"分類器の名前__パラメータ名\" , にする.\r\n # Key に対するバリューをパラメータ値とするディクショナリを生成.\r\n out = self.named_classifiers.copy() # named_classifiers.copy() : \r\n\r\n # ? six.iteritems() : \r\n for name, step in six.iteritems( self.named_classifiers ):\r\n for key, value in six.iteritems( step.get_params(deep=True) ):\r\n out['%s__%s' % (name, key)] = value\r\n\r\n return out\r\n\r\n def get_classiflers( self ):\r\n \"\"\"\r\n \"\"\"\r\n return self.classifiers\r\n\r\n def get_class_labels( self ):\r\n \"\"\"\r\n \"\"\"\r\n return self.class_labels\r\n\r\n def calcEnsenbleError( self, error ):\r\n \"\"\"\r\n 2項分布(多数決)に基づいた, アンサンブルの誤分類率を計算する.\r\n P_ensemble = ∑ n_C_k * e^k * (1-e)^n-k\r\n\r\n [Input]\r\n error : float\r\n\r\n [Output]\r\n アンサンブルの誤分類率 : float\r\n \"\"\"\r\n # 組み合わせ n_C_k の 最初の k 値を算出\r\n k_start = int( math.ceil( self.__n_classifier / 2.0 ) ) # math.ceil() : 引数の値以上の最小の整数を返す\r\n\r\n # n_C_k * e^k * (1-e)^n-k 部分を計算(リストの内含的記述)\r\n probes = [ \r\n comb( self.__n_classifier, k ) * (error**k) * (1-error)**(self.__n_classifier - k) # comb() : 組み合わせ\r\n for k in range( k_start, self.__n_classifier + 1 ) \r\n ]\r\n\r\n # ∑をとる\r\n p_ensemble = sum(probes)\r\n\r\n return p_ensemble\r\n \r\n def plotEnsenbleErrorAndBaseError( self, errors = numpy.arange( 0.0 , 1.01, 0.01 ) ):\r\n \"\"\"\r\n 2項分布(多数決)に基づいたアンサンブルの誤分類率と,通常の(アンサンブルでない)誤分類の図を描写する.\r\n\r\n [Input]\r\n errors : numpy.ndarray\r\n 誤分類率のリスト(x軸に対応)\r\n \"\"\"\r\n # Ensenble Error rate を計算する\r\n p_ensembles = [\r\n self.calcEnsenbleError( error )\r\n for error in errors\r\n ]\r\n \r\n # アンサンブルの誤分類を plot\r\n plt.plot(\r\n errors, p_ensembles, \r\n label = \"Ensemble error\", \r\n linewidth = 2\r\n )\r\n\r\n # 通常の(アンサンブルでない)誤分類を plot\r\n plt.plot(\r\n errors, errors,\r\n linestyle='--',\r\n label = \"Base error\", \r\n linewidth = 2\r\n )\r\n\r\n # 当て推量 (eror = 0.5) の線の plot\r\n plt.axvline( x = 0.5, linewidth = 1.0, color = 'k', linestyle = '--' )\r\n plt.axhline( y = 0.5, linewidth = 1.0, color = 'k', linestyle = '--' )\r\n \r\n #\r\n plt.title( \"Base/Ensemble error \\n number of classifiler = %d\" % self.__n_classifier )\r\n plt.xlabel( \"Base error\" )\r\n plt.ylabel( \"Base/Ensemble error\" )\r\n\r\n plt.legend( loc = \"best\" )\r\n #plt.grid()\r\n plt.tight_layout()\r\n\r\n return\r\n", "sub_path": "EnsembleLearning_scikit-learn/EnsembleModelClassifier.py", "file_name": "EnsembleModelClassifier.py", "file_ext": "py", "file_size_in_byte": 15718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sklearn.base.BaseEstimator", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.base.ClassifierMixin", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.pipeline._name_estimators", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.base.clone", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.apply_along_axis", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 244, "usage_type": "call"}, {"api_name": "sklearn.externals.six.iteritems", "line_number": 265, "usage_type": "call"}, {"api_name": "sklearn.externals.six", "line_number": 265, "usage_type": "name"}, {"api_name": "sklearn.externals.six.iteritems", "line_number": 266, "usage_type": "call"}, {"api_name": "sklearn.externals.six", "line_number": 266, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 293, "usage_type": "call"}, {"api_name": "scipy.misc.comb", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 337, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}]} +{"seq_id": "353143757", "text": "#Import the libraries.\nimport numpy as np\nimport cv2\n#open the web camera default is 0 to use external web camera use 1.\ncamera=cv2.VideoCapture(0)\n#getting the face casecade object.\nface_cas=cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n#store the face values in list data\nfaces_data=[]\n#current frame number\nix=0\nwhile True:\n #if camera detect an object it return true and it frams value(numpy matrix)\n ret,frame=camera.read()\n if ret==True:\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces=face_cas.detectMultiScale(gray,1.3,5)\n for (x,y,w,h) in faces:\n face_component=frame[y:y+h,x:x+w,:]\n fc=cv2.resize(face_component,(50,50))\n if ix%10==0 and len(faces_data)<20:\n faces_data.append(fc)\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n ix+=1\n cv2.imshow('frame',frame)\n if cv2.waitKey(1)==27 or len(faces_data)>=20:\n break\n \n else:\n print(\"Error\")\n\ncv2.destroyAllWindows()\nfaces_data=np.asarray(faces_data)\nprint(faces_data.shape)\nprint(faces_data)\n\n", "sub_path": "user_photo_grab/grap_45_faces.py", "file_name": "grap_45_faces.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "536726434", "text": "\"\"\"--------------------------------------------------------------------------------------------------------------------------------------\nMODULE\n LoanRepaymentNoticeXMLHooks\n\nDESCRIPTION\n This module contains the xml hooks used to generate the values in the xml template.\n\n-----------------------------------------------------------------------------------------------------------------------------------------\nHISTORY\n=========================================================================================================================================\nDate Change no Developer Requester Description\n-----------------------------------------------------------------------------------------------------------------------------------------\n2018-11-20 Stuart Wilson Loan Ops XML Hooks for confo generation\n-----------------------------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nfrom datetime import datetime\n\nimport acm\n\nfrom EmailBodyHTMLGenerator import EmailBodyHTMLGenerator, GenerateEmailBodyHTMLRequest\nfrom LoanRepaymentNoticeXMLGenerator import GenerateRepaymentNoticeXMLRequest, RepaymentNoticeXMLGenerator\n\n\nemail_body_html_generator = EmailBodyHTMLGenerator()\ngenerator = RepaymentNoticeXMLGenerator()\n\n\ndef value_date(confirmation):\n date=acm.Time.DateAddDelta(acm.Time.DateFromTime(confirmation.CreateTime()), 0, 0, 7)\n return datetime(*acm.Time.DateToYMD(date)).strftime('%d %B %Y')\n\ndef get_email_body(confirmation):\n \"\"\"\n This Function creates an html email body\n \"\"\"\n document_description = 'your {document_type} for {period_description}'\n document_description = document_description.format(\n document_type='Repayment Notice',\n period_description=value_date(confirmation)\n )\n request = GenerateEmailBodyHTMLRequest(\n confirmation.AcquirerContactRef().Attention(),\n confirmation.AcquirerContactRef().Telephone(),\n get_email_from(confirmation),\n document_description\n )\n return email_body_html_generator.generate_html(request)\n\n\ndef get_email_subject(confirmation):\n \"\"\"\n Function for creates email subject\n \"\"\"\n conf_date = value_date(confirmation)\n counterparty = str(confirmation.Receiver().Id())\n event_name = confirmation.EventChlItem().Name()\n seq = (event_name + \":\", counterparty, conf_date)\n return \"\".join(seq)\n\n\ndef get_email_bcc(confirmation):\n \"\"\"\n Get any email address to be BCC'ed when delivering a term\n deposit statement.\n \"\"\"\n prod_env = acm.FInstallationData.Select('').At(0).Name() == 'Production'\n if prod_env:\n contact = confirmation.AcquirerContactRef()\n return contact.Email()\n return None\n\n\ndef get_email_file_name(confirmation):\n \"\"\"\n This Function returns filename which is event name + counterparty + confirmation date\n \"\"\"\n\n conf_date = value_date(confirmation)\n counterparty = str(confirmation.Receiver().Id())\n event_name = confirmation.EventChlItem().Name()\n seq = (event_name, counterparty, conf_date)\n return \"_\".join(seq)\n\n\ndef get_email_to(confirmation):\n \"\"\"\n Get the To email address to use for delivery of a term\n deposit statement.\n \"\"\"\n contact = confirmation.CounterpartyContactRef()\n return contact.Email()\n\n\ndef get_email_from(confirmation):\n \"\"\"\n Get the From email address to use for delivery of a term\n deposit statement.\n \"\"\"\n contact = confirmation.AcquirerContactRef()\n # Ensure that only one from email is specified or the email\n # may be rejected by the mail server.\n return contact.Email().split(',')[0]\n\n\ndef repayment_notice_xml(confirmation):\n \"\"\"\n Function to return specific xml generated for rate notice document\n \"\"\"\n xml_request = GenerateRepaymentNoticeXMLRequest(\n confirmation.Acquirer(),\n confirmation.AcquirerContactRef(),\n confirmation.Counterparty(),\n confirmation.CounterpartyContactRef(),\n confirmation\n )\n return generator.generate_xml(xml_request)\n", "sub_path": "Extensions/ABSA Documentation/FPythonCode/LoanRepaymentNoticeXMLHooks.py", "file_name": "LoanRepaymentNoticeXMLHooks.py", "file_ext": "py", "file_size_in_byte": 4162, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "EmailBodyHTMLGenerator.EmailBodyHTMLGenerator", "line_number": 25, "usage_type": "call"}, {"api_name": "LoanRepaymentNoticeXMLGenerator.RepaymentNoticeXMLGenerator", "line_number": 26, "usage_type": "call"}, {"api_name": "acm.Time.DateAddDelta", "line_number": 30, "usage_type": "call"}, {"api_name": "acm.Time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "acm.Time.DateFromTime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "call"}, {"api_name": "acm.Time.DateToYMD", "line_number": 31, "usage_type": "call"}, {"api_name": "acm.Time", "line_number": 31, "usage_type": "attribute"}, {"api_name": "EmailBodyHTMLGenerator.GenerateEmailBodyHTMLRequest", "line_number": 42, "usage_type": "call"}, {"api_name": "acm.FInstallationData.Select", "line_number": 67, "usage_type": "call"}, {"api_name": "acm.FInstallationData", "line_number": 67, "usage_type": "attribute"}, {"api_name": "LoanRepaymentNoticeXMLGenerator.GenerateRepaymentNoticeXMLRequest", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "225405608", "text": "# encoding: utf-8\n\"\"\"\n@author: Yuxian Meng\n@contact: yuxian_meng@shannonai.com\n\n@version: 1.0\n@file: load_data\n@time: 2019/11/6 14:49\n\"\"\"\n\nimport os\nfrom torch.utils.data import Dataset\nfrom fairseq.data.indexed_dataset import MMapIndexedDataset\n\n\nclass SequenceLabelingDataset(Dataset):\n \"\"\"Sequence Labeling Dataset\"\"\"\n def __init__(self, directory, prefix, fields=None):\n super().__init__()\n fields = fields or [\"inputs\", \"labels\", \"label_mask\", \"attention_mask\", \"segment_ids\"]\n self.fields2datasets = {}\n self.fields = fields\n for field in fields:\n self.fields2datasets[field] = MMapIndexedDataset(os.path.join(directory, f\"{prefix}.{field}\"))\n self.lengths = []\n self.get_lengths()\n\n def __len__(self):\n return len(self.fields2datasets[self.fields[0]])\n\n def __getitem__(self, item):\n return [self.fields2datasets[field][item] for field in self.fields]\n\n def get_lengths(self):\n \"\"\"\n for group batch sampler\n \"\"\"\n for label_mask in self.fields2datasets['label_mask']:\n self.lengths.append(label_mask.sum().item())\n\n\nclass SequenceMaskingDataset(Dataset):\n \"\"\"Sequence Labeling Dataset\"\"\"\n def __init__(self, directory, prefix, fields=None):\n super().__init__()\n fields = fields or [\"inputs\", \"attention_mask\", \"lm_label_ids\"]\n self.fields2datasets = {}\n self.fields = fields\n for field in fields:\n self.fields2datasets[field] = MMapIndexedDataset(os.path.join(directory, f\"{prefix}.{field}\"))\n self.lengths = []\n self.get_lengths()\n\n def __len__(self):\n return len(self.fields2datasets[self.fields[0]])\n\n def __getitem__(self, item):\n return [self.fields2datasets[field][item] for field in self.fields]\n\n def get_lengths(self):\n \"\"\"\n for group batch sampler\n \"\"\"\n for label_mask in self.fields2datasets['attention_mask']:\n self.lengths.append(label_mask.sum().item())\n\n\ndef run():\n path = \"/data/nfsdata2/nlp_application/datasets/grammar-correction/chinese/chinese_ner/v2_20191119/bin\"\n prefix = \"train\"\n fields = [\"inputs\", \"labels\", \"label_mask\", \"segment_ids\"]\n fields = None\n dataset = SequenceLabelingDataset(path, prefix=prefix, fields=fields)\n print(len(dataset))\n for d in dataset:\n print([v.shape for v in d])\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "bert_ner/dataset_readers/load_data.py", "file_name": "load_data.py", "file_ext": "py", "file_size_in_byte": 2449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 16, "usage_type": "name"}, {"api_name": "fairseq.data.indexed_dataset.MMapIndexedDataset", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 42, "usage_type": "name"}, {"api_name": "fairseq.data.indexed_dataset.MMapIndexedDataset", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "447370649", "text": "\"\"\"People Counter.\"\"\"\n\"\"\"\n Copyright (c) 2018 Intel Corporation.\n Permission is hereby granted, free of charge, to any person obtaining\n a copy of this software and associated documentation files (the\n \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish,\n distribute, sublicense, and/or sell copies of the Software, and to\n permit person to whom the Software is furnished to do so, subject to\n the following conditions:\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\nimport os\nimport sys\nimport time\nimport socket\nimport json\nimport cv2\n\nimport logging as log\nimport paho.mqtt.client as mqtt\n\nfrom argparse import ArgumentParser\nfrom inference import Network\n\n# MQTT server environment variables\nHOSTNAME = socket.gethostname()\nIPADDRESS = socket.gethostbyname(HOSTNAME)\nMQTT_HOST = IPADDRESS\nMQTT_PORT = 3001\nMQTT_KEEPALIVE_INTERVAL = 60\n\nMODEL_NAME = \"ssd_mobilenet_v1_coco\"\n\nFRAME_WIDTH = 768\nFRAME_HEIGHT = 432\n\n#Taken from https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n#implements non-max suppression\n# Malisiewicz et al.\ndef non_max_suppression_fast(boxes, overlapThresh = 0.4):\n import numpy as np\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n # initialize the list of picked indexes\t\n pick = []\n # grab the coordinates of the bounding boxes\n x1 = boxes[:,3]\n y1 = boxes[:,4]\n x2 = boxes[:,5]\n y2 = boxes[:,6]\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(y2)\n \n # keep looping while some indexes still remain in the indexes list\n \n while len(idxs) > 0:\n \n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n \n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n \n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n \n # delete all indexes from the index list that have\n idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))\n \n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick].astype(\"int\")\n\n\n\ndef build_argparser():\n \"\"\"\n Parse command line arguments.\n\n :return: command line arguments\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser\n\n\ndef connect_mqtt():\n ### TODO: Connect to the MQTT client ###\n client = mqtt.Client()\n client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)\n\n return client\n\n\ndef infer_on_stream(args, client):\n \"\"\"\n Initialize the inference network, stream video to network,\n and output stats and video.\n\n :param args: Command line arguments parsed by `build_argparser()`\n :param client: MQTT client\n :return: None\n \"\"\"\n import numpy as np\n # Initialise the class\n infer_network = Network()\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n ### TODO: Load the model through `infer_network` ###\n \n network = infer_network.load_model(model= args.model, device = args.device, cpu_extension = args.cpu_extension)\n ### TODO: Handle the input stream ###\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n CODEC = 0x00000021\n elif sys.platform == \"darwin\":\n CODEC = cv2.VideoWriter_fourcc('M','J','P','G')\n else:\n print(\"Unsupported OS.\")\n exit(1)\n \n image_flag = False\n if args.input == 'CAM':\n args.i = 0\n elif args.input.split(\".\")[-1] == 'jpg' or args.input.split(\".\")[-1] == 'bmp' or args.input.split(\".\")[-1] == 'png':\n image_flag = True\n \n cap = cv2.VideoCapture(args.input)\n cap.open(args.input)\n \n if not image_flag:\n out = cv2.VideoWriter('output.mp4', CODEC, 30, (FRAME_WIDTH, FRAME_HEIGHT))\n \n net_input_shape = infer_network.get_input_shape()\n \n person_count = 0 #for counting the number of persons\n PERSON_DURATION = 0 #for the duration of time the person spends in the frames\n person_tracker = np.array([]) #for trackin the person 0->person_count, 1-> PERSON_DURATION, 2-> PERSON_TRACKING , 3-> MISSING FPS, 4-> had started tracking from \n person_centroids = np.array([]) # keeps all the centroids of the people tracked.\n frames_elapsed = 0\n no_longer_tracking = [] #Keeping a list of all those people no longer being tracked\n mean = 0\n p_mean = 0\n \n \n while cap.isOpened():\n ### TODO: Loop until stream is over ###\n \n ### TODO: Read from the video capture ###\n flag, frame = cap.read()\n \n if not flag:\n break\n key_pressed = cv2.waitKey(60)\n \n #Getting Frames elapsed and FPS\n frames_elapsed += 1\n fps = cap.get(cv2.CAP_PROP_FPS) #Please note that this might not work with Webcam\n\n ### TODO: Pre-process the image as needed ###\n img = cv2.resize( frame, (net_input_shape[3], net_input_shape[2]))\n img = img.transpose((2,0,1))\n img = img.reshape(1, *img.shape)\n \n ### TODO: Start asynchronous inference for specified request ###\n infer_network.exec_net(img)\n ### TODO: Wait for the result ###\n if infer_network.wait() == 0:\n ### TODO: Get the results of the inference request ###\n output = infer_network.get_output()\n \n filter_output = [] #filter only the \"Person\" (class = 1)\n for each in output[0][0]:\n# print(\"NETWORK OUTPUT\", each)\n if( each[1] == 1):\n if( prob_threshold != None):\n #if probabilty threshold has been given as argument\n if( each[2] >= prob_threshold ):\n# filter_output.append([each[0], each[1], each[2] * 100, int(each[3] * net_input_shape[3]), int(each[4] * net_input_shape[2]), int(each[5] * net_input_shape[3]), int(each[6] * net_input_shape[2])])\n filter_output.append([each[0], each[1], each[2] * 100, int(each[3] * FRAME_WIDTH), int(each[4] * FRAME_HEIGHT), int(each[5] * FRAME_WIDTH), int(each[6] * FRAME_HEIGHT)])\n else:\n if( each[2] >= 0.6):\n #Default prob threshold is 0.6\n# filter_output.append([each[0], each[1], each[2] * 100 , int(each[3] * net_input_shape[3]), int(each[4] * net_input_shape[2]), int(each[5] * net_input_shape[3]), int(each[6] * net_input_shape[2])])\n filter_output.append([each[0], each[1], each[2] * 100 , int(each[3] * FRAME_WIDTH), int(each[4] * FRAME_HEIGHT), int(each[5] * FRAME_WIDTH), int(each[6] * FRAME_HEIGHT)])\n \n #filter output now contains the original sized image boundaries\n filter_output = np.array(filter_output, dtype = 'int')\n filter_output = non_max_suppression_fast(filter_output, 0.3)\n o_frame = cv2.resize(frame, (FRAME_WIDTH, FRAME_HEIGHT))\n tracked = [] #To get the IDs of the people tracker in this frame.\n CURRENT_COUNT = len(filter_output)\n\n\n #LOGIC:- \n # 1. For every detection, calculate the centroid of the detection\n # 2. If the centroid is close to one of the tracking centroids, then the person must be still in the frame.\n # 3. Update the matched centroid.\n # 4. If no matching centroid is found for a detection, then it is a new person. The person is added to the person_tracking array.\n # 5. For every person who is being tracked but is not detected in the frame, the specific criteria in line 309\n # 6. A person who is not being tracked has his/her stats removed from person_tracking and person_centroids and put into no_longer_tracking\n\n\n\n for each in filter_output:\n PERSON_CENTROID = ((each[3] + each[5])/2, (each[4] + each[6])/2 )\n person_found_flag = False\n \n if len(person_centroids) > 0 : \n x, y = person_centroids[np.where(person_tracker[:,2] == 1), 0].flatten(), person_centroids[ np.where(person_tracker[:,2] == 1), 1].flatten()\n distances = ( (x - PERSON_CENTROID[0])**2 + (y - PERSON_CENTROID[1])**2 )**0.5\n \n if len(distances) > 0:\n \n idx = np.argsort(distances)[0]\n \n if distances[idx] <= 85 :\n #MIN_DISTANCE Criteria satisfied!\n #if we are still tracking the person\n o_frame = cv2.rectangle(o_frame, (each[3], each[4]), (each[5], each[6]), (255, 0, 0), 2)\n o_frame = cv2.circle(o_frame, (int(PERSON_CENTROID[0]), int(PERSON_CENTROID[1])), 2, (0,0,255), 2) #Putting the centroid\n person_tracker[idx][1] = time.time() - person_tracker[idx][4] #Updating the Duration of Tracking\n\n text_1 = \"PERSON_ID-\"+ str(int(person_tracker[idx][0])+1)\n text_2 = \"LOC-\" + str(person_centroids[idx])\n text_3 = \"DUR-\" + str(person_tracker[idx][1])\n\n cv2.putText(o_frame, text_1 , (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n cv2.putText(o_frame, text_2 , (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n cv2.putText(o_frame, text_3 , (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n\n person_centroids[idx] = PERSON_CENTROID #updating the person's centroid\n person_found_flag = True #no new person was encountered\n tracked.append(person_tracker[idx][0]) #adding idx to the tracked list\n \n if person_found_flag == False :\n #person was not found so \n \n person = [person_count, 0, 1, 0, time.time()]\n \n if len(person_centroids) == 0:\n person_tracker = np.append(person_tracker, person)\n person_centroids = np.append(person_centroids, PERSON_CENTROID)\n person_tracker = np.expand_dims(person_tracker, axis=0)\n person_centroids = np.expand_dims(person_centroids, axis=0)\n else:\n person_tracker = np.vstack([person_tracker, person])\n person_centroids = np.vstack([person_centroids, PERSON_CENTROID])\n \n o_frame = cv2.rectangle(o_frame, (each[3], each[4]), (each[5], each[6]), (255, 0, 0), 2)\n o_frame = cv2.circle(o_frame, (int(PERSON_CENTROID[0]), int(PERSON_CENTROID[1])), 2, (0,0,255), 2) #putting the centroid\n text_1 = \"PERSON_ID-\"+ str(person_count+1) \n text_2 = \"LOC-\" + str(PERSON_CENTROID)\n text_3 = \"DUR-\" + str(person[1])\n \n cv2.putText(o_frame, text_1 , (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n cv2.putText(o_frame, text_2 , (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n cv2.putText(o_frame, text_3 , (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n \n tracked.append(person_count)\n person_count += 1\n person_found_flag = True\n \n i=0\n while(i= 150 and ( abs(person_centroids[i][0] - FRAME_WIDTH) <= 150 or abs(person_centroids[i][1] - FRAME_HEIGHT) <= 50)) or ( abs(person_centroids[i][0] - FRAME_WIDTH) <= 150 or abs(person_centroids[i][1] - FRAME_HEIGHT) <= 50) or person_tracker[i][3] >= 250:\n #if the person has been missing for more than 100 frames then the person has left so removing him/her form list\n if person_tracker[i][2]:\n person_tracker[i][2] = 0\n person_stats = person_tracker[i]\n \n person_centroids = list(person_centroids)\n person_tracker = list(person_tracker)\n \n person_centroid = person_centroids.pop(i)\n person_stats = person_tracker.pop(i)\n \n person_centroids = np.array(person_centroids)\n person_tracker = np.array(person_tracker)\n \n no_longer_tracking.append([person_stats, person_centroid])\n \n else:\n person_tracker[i][3] = 0 #Person has been tracked and so his missing frames count is reset\n i += 1\n \n \n for each_person in range(len(person_tracker)):\n \n if(person_tracker[each_person, 2]):\n o_frame = cv2.circle(o_frame, (int(person_centroids[each_person][0]), int(person_centroids[each_person][1]) ), 2, (0, 255, 0), 2) #putting the centroid for tracking person\n cv2.putText(o_frame, \"(\"+str(person_centroids[each_person][0])+\", \"+str(person_centroids[each_person][1])+\")\", (int(person_centroids[each_person][0]), int(person_centroids[each_person][1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0), 1, cv2.LINE_AA)\n \n for each_person in no_longer_tracking:\n \n o_frame = cv2.circle(o_frame, (int(each_person[1][0]), int(each_person[1][1]) ), 2, (0, 0, 255), 2) #putting the centroid for non-tracking person\n \n cv2.putText(o_frame, \"(\"+str(each_person[1][0])+\", \"+str(each_person[1][1])+\")\", (int(each_person[1][0]), int(each_person[1][1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1, cv2.LINE_AA)\n \n \n \n cv2.putText(o_frame, \"Total Frames - \" + str(frames_elapsed), (10, FRAME_HEIGHT - 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n \n cv2.putText(o_frame, \"FPS - \" + str(fps), (10, FRAME_HEIGHT - 55), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n \n cv2.putText(o_frame, \"Inference Time - \" + str(frames_elapsed / fps), (10, FRAME_HEIGHT - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n \n cv2.putText(o_frame, \"Total Count - \"+str(person_count)+\", Current Count - \"+ str(len(filter_output)), (10, FRAME_HEIGHT - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA) \n \n cv2.putText(o_frame, \"MODEL- \"+MODEL_NAME, (10, FRAME_HEIGHT - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n \n \n output = cv2.resize(o_frame, (FRAME_WIDTH, FRAME_HEIGHT))\n\n ### TODO: Extract any desired stats from the results ###\n \n ### TODO: Calculate and send relevant information on ###\n ### current_count, total_count and duration to the MQTT server ###\n ### Topic \"person\": keys of \"count\" and \"total\" ###\n client.publish(\"person\", json.dumps({ \"count\": len(person_centroids), \"total\": str(person_count)}))\n \n \n \n if len(no_longer_tracking) > 0:\n mean = np.mean([p[1] for p, c in no_longer_tracking])\n \n p_mean = mean\n ### Topic \"person/duration\": key of \"duration\" ###\n client.publish(\"person/duration\", json.dumps({\"duration\": mean}))\n\n ### TODO: Send the frame to the FFMPEG server ###\n sys.stdout.buffer.write(output)\n sys.stdout.flush()\n \n ### TODO: Write an output image if `single_image_mode` ###\n if not image_flag:\n out.write(output)\n else:\n cv2.imwrite('output_image.jpg', output)\n \n if key_pressed == 27:\n break\n \n if not image_flag:\n out.release()\n \n cap.release()\n cv2.destroyAllWindows()\n \n client.disconnect()\n \n\ndef main():\n \"\"\"\n Load the network and parse the output.\n\n :return: None\n \"\"\"\n # Grab command line args\n args = build_argparser().parse_args()\n # Connect to the MQTT server\n client = connect_mqtt()\n # Perform inference on the input stream\n infer_on_stream(args, client)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "Submission/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 19939, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "socket.gethostname", "line_number": 37, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 97, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 111, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 134, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 134, "usage_type": "name"}, {"api_name": "inference.Network", "line_number": 151, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 161, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 173, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 199, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 203, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 233, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 260, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 266, "usage_type": "call"}, {"api_name": "time.time", "line_number": 267, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 273, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 273, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 273, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 274, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 274, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 274, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 275, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 275, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 275, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 293, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 295, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 296, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 301, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 301, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 301, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 303, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 303, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 303, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 331, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 343, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 344, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 344, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 344, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 348, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 350, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 350, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 350, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 354, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 354, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 354, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 356, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 356, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 356, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 358, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 358, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 358, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 360, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 360, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 360, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 362, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 362, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 362, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 365, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 377, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 381, "usage_type": "call"}, {"api_name": "sys.stdout.buffer.write", "line_number": 384, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 384, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 385, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 385, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 391, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 400, "usage_type": "call"}]} +{"seq_id": "587982863", "text": "import numpy\nimport pca\nimport pickle\n\n# 存储降维后的矩阵\nxformedItems = None\n\ndef ecludSim (inA, inB):\n\treturn 1.0 / (1.0 + numpy.linalg.norm (inA - inB))\n\ndef pearsSim (inA, inB):\n\tif len (inA) < 3:\n\t\treturn 1.0\n\treturn 0.5 + 0.5 * numpy.corrcoef (inA, inB, rowvar = 0)[0][1]\n\ndef cosSim (inA, inB):\n\tnum = float (inA.T * inB)\n\tdenom = numpy.linalg.norm (inA) * numpy.linalg.norm (inB)\n\tif num == 0:\n\t\treturn 0\n\treturn 0.5 + 0.5 * num / denom\n\t\ndef standEst (dataMat, project, simMeas, item):\n\tn = numpy.shape (dataMat)[1]\n\tsimTotal = 0.0 ; ratSimTotal = 0.0\n\t\n\t# 遍历物品,对item进行评价估计\n\tfor j in range (n):\n\t\tprojectRating = project[j] # 当前用户对物品j的评价\n\t\tif projectRating == 0:\n\t\t\tcontinue\n\n\t\tsimilarity = simMeas (dataMat[:, item], dataMat[:, j]) # 计算相似度\n\t\tsimTotal += 1 # 相似度累计\n\t\tratSimTotal += similarity * projectRating # 对item进行评价估计\n\t\n\t# 估算对item的评价\n\tif simTotal == 0:\n\t\treturn 0\n\telse:\n\t\treturn ratSimTotal / simTotal\n\t\t\ndef svdEst (dataMat, project, simMeas, item):\n\tn = numpy.shape (dataMat)[1]\n\tsimTotal = 0.0 ; ratSimTotal = 0.0\n\t\n\t# PCA降维\n\tglobal xformedItems\n\tif (xformedItems == None):\n\t\txformedItems = pca.svdPCA (dataMat.T)\n#\t\ttry:\n#\t\t\twith open ('pca.pkl', 'rb') as fr:\n#\t\t\t\txformedItems = pickle.load (fr)\n#\t\texcept FileNotFoundError: \n#\t\t\txformedItems = pca.svdPCA (dataMat.T)\n#\t\t\twith open ('pca.pkl', 'wb') as fr:\n#\t\t\t\tpickle.dump (xformedItems, fr)\n\t\t\n\t# 遍历物品,对item进行评价估计\n\tfor j in range (n):\n\t\tprojectRating = project[j] # 当前项目对图片j的使用情况\n\t\tif projectRating == 0:\n\t\t\tcontinue\n\t\tsimilarity = simMeas (xformedItems[item, :].T, xformedItems[j, :].T) # 在低维空间计算相似度\n\t\tsimTotal += 1 # 相似度累计\n\t\tratSimTotal += similarity * projectRating # 对item进行风格相似度计算\n\t\n\t# 估算item的风格相似度\n\tif simTotal == 0:\n\t\treturn 0\n\telse:\n\t\treturn ratSimTotal / simTotal\n\ndef recommend (dataMat, project, per = 0.7, simMeas = cosSim, estMethod = svdEst):\n\tunratedItems = numpy.nonzero (project == 0)[0]\n\tif len (unratedItems) == 0:\n\t\treturn []\n\n\t# 统计所有未使用的图片\n\titemScores = []\n\tfor item in unratedItems:\n\t\testimatedScore = estMethod (dataMat, project, simMeas, item) # 获取评价的估计值\n\t\titemScores.append ((item, estimatedScore))\n\t\n\t# 返回风格相似度per以上的图片\n\titemScores = filter (lambda jj: jj[1] >= per, itemScores)\n\treturn sorted (itemScores, key = lambda jj: jj[1], reverse = True)\n\t\ndef MAE (dataMat, project, simMeas = cosSim, estMethod = svdEst):\n\tratedItems = numpy.nonzero (project == 1)[0]\n\n\t# 统计所有使用过的图片\n\tsumSores = 0\n\tfor item in ratedItems:\n\t\tproject[item] = 0\n\t\testimatedScore = estMethod (dataMat, project, simMeas, item) # 获取评价的估计值\n\t\tproject[item] = 1\n\t\tsumSores += 1 - estimatedScore\n\t\n\treturn sumSores / len (ratedItems)", "sub_path": "recommend.py", "file_name": "recommend.py", "file_ext": "py", "file_size_in_byte": 2909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.linalg.norm", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.corrcoef", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 44, "usage_type": "call"}, {"api_name": "pca.svdPCA", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "127720988", "text": "# -*- coding: utf-8 -*-\nimport openerp.http as http\nimport odoo\nfrom odoo.http import request\nimport base64\nimport werkzeug\n\ndef binary_content(xmlid=None, model='ir.attachment', id=None, field='datas', unique=False, filename=None, filename_field='datas_fname', download=False, mimetype=None, default_mimetype='application/octet-stream', env=None):\n return request.registry['ir.http'].binary_content(\n xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename, filename_field=filename_field,\n download=download, mimetype=mimetype, default_mimetype=default_mimetype, env=env)\n\nclass ImageResizeHackController(http.Controller):\n\n def force_contenttype(self, headers, contenttype='image/png'):\n dictheaders = dict(headers)\n dictheaders['Content-Type'] = contenttype\n return dictheaders.items()\n \n @http.route('/web/image2//x/', type=\"http\", auth=\"public\")\n def content_image(self, xmlid=None, model='ir.attachment', id=None, field='datas', filename_field='datas_fname', unique=None, filename=None, mimetype=None, download=None, width=0, height=0):\n status, headers, content = binary_content(xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename, filename_field=filename_field, download=download, mimetype=mimetype, default_mimetype='image/png')\n if status == 304:\n return werkzeug.wrappers.Response(status=304, headers=headers)\n elif status == 301:\n return werkzeug.utils.redirect(content, code=301)\n elif status != 200 and download:\n return request.not_found()\n\n if content and (width or height):\n content = odoo.tools.image_resize_image(base64_source=content, size=(width or None, height or None), encoding='base64', filetype='PNG')\n # resize force png as filetype\n headers = self.force_contenttype(headers, contenttype='image/png')\n\n if content:\n image_base64 = base64.b64decode(content)\n else:\n image_base64 = self.placeholder(image='placeholder.png') # could return (contenttype, content) in master\n headers = self.force_contenttype(headers, contenttype='image/png')\n\n headers.append(('Content-Length', len(image_base64)))\n response = request.make_response(image_base64, headers)\n response.status_code = status\n return response\n ", "sub_path": "migration_wordpress/controllers/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "odoo.http.request.registry", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 9, "usage_type": "name"}, {"api_name": "openerp.http.Controller", "line_number": 13, "usage_type": "attribute"}, {"api_name": "openerp.http", "line_number": 13, "usage_type": "name"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "werkzeug.wrappers", "line_number": 24, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "werkzeug.utils", "line_number": 26, "usage_type": "attribute"}, {"api_name": "odoo.http.request.not_found", "line_number": 28, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 28, "usage_type": "name"}, {"api_name": "odoo.tools.image_resize_image", "line_number": 31, "usage_type": "call"}, {"api_name": "odoo.tools", "line_number": 31, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 36, "usage_type": "call"}, {"api_name": "odoo.http.request.make_response", "line_number": 42, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 42, "usage_type": "name"}, {"api_name": "openerp.http.route", "line_number": 20, "usage_type": "call"}, {"api_name": "openerp.http", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "295817562", "text": "'''\nCreated on 22/04/2014\n\n@author: submarino\n'''\nfrom django.http import HttpResponse\nimport demjson as djson\nfrom django.utils import simplejson\nfrom decimal import Decimal\nfrom datetime import datetime, date, time\nimport json\n\nclass ExtendedJSONEncoder(simplejson.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Decimal):\n exponent = obj.as_tuple()[2]\n if exponent < 0:\n return \"%%.%df\" % -exponent % obj\n else:\n return \"%d.0\" % obj\n if isinstance(obj, datetime) or isinstance(obj, date) or isinstance(obj, time):\n return obj.isoformat()\n return super(ExtendedJSONEncoder, self).default(obj)\n\ndef json_view(func):\n def wrap(req, *args, **kwargs):\n\n params = {}\n try:\n params = djson.decode(req.body)\n except:\n pass\n resp = func(params, *args, **kwargs)\n\n if isinstance(resp, HttpResponse):\n return resp\n \n try:\n resp, status = resp[0],resp[1]\n except:\n resp, status = resp, None\n\n return HttpResponse(json.dumps(resp, cls=ExtendedJSONEncoder), status=status)\n\n return wrap\n", "sub_path": "ws-root/utils/json_utils.py", "file_name": "json_utils.py", "file_ext": "py", "file_size_in_byte": 1209, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.utils.simplejson.JSONEncoder", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.utils.simplejson", "line_number": 13, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 15, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "argument"}, {"api_name": "datetime.date", "line_number": 21, "usage_type": "argument"}, {"api_name": "datetime.time", "line_number": 21, "usage_type": "argument"}, {"api_name": "demjson.decode", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 43, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "176303719", "text": "import sys\nimport time\nimport sqlite3\nimport re\nimport traceback\nimport telepot\nfrom pprint import pprint\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom urllib import parse\n\nclass Telegram:\n def __init__(self):\n self.bot = telepot.Bot('1894072938:AAE54zG8gITUFBVF7-PnwKXp1vrugaNtG68')\n self.bot.getMe()\n self.id = 1874023304\n self.key = \"YrQn72lYE4qA3NfS2pkl%2FEwy95kCZ8jghF27PMOoOD3apbMi6htMwfFztU28urc6rMLLh8eWyVdDGVLCooMWPw%3D%3D\"\n #self.key_2 =\"YrQn72lYE4qA3NfS2pkl%2FEwy95kCZ8jghF27PMOoOD3apbMi6htMwfFztU28urc6rMLLh8eWyVdDGVLCooMWPw%3D%3D\"\n self.bot.sendMessage(self.id, '내정신좀보소 bot 입니다.')\n self.bot.message_loop(self.handle)\n print('Listening...')\n while 1:\n time.sleep(10)\n\n def replyData(self,data_param,area,object):\n if data_param ==0:\n url = 'http://apis.data.go.kr/1320000/LosfundInfoInqireService/getLosfundInfoAccToLc?serviceKey=' + self.key + \\\n '&PRDT_NM='+str(parse.quote(object)) +'&ADDR='+str(parse.quote(area)) +'&pageNo=1&numOfRows=10'\n elif data_param ==1:\n url = 'http://apis.data.go.kr/1320000/LostGoodsInfoInqireService/getLostGoodsInfoAccTpNmCstdyPlace?serviceKey=' + self.key + \\\n '&LST_PLACE='+str(parse.quote(area)) +'&LST_PRDT_NM='+str(parse.quote(object)) +'&pageNo=1&numOfRows=10'\n\n response = urlopen(url).read()\n\n soup = BeautifulSoup(response, 'html.parser')\n items = soup.findAll('item')\n res_list = []\n for item in items:\n sItem = str(item)\n item = re.sub('<.*?>', '|', sItem)\n parsed = item.split('|')\n print(parsed)\n try:\n if data_param ==0:\n row = parsed[2] + '\\n' + parsed[6] + '\\n' + parsed[10] + '\\n' + parsed[14] + '\\n '\n elif data_param ==1:\n row = parsed[4] + '\\n' + parsed[8] + '\\n' + parsed[10] + '\\n'\n\n except IndexError:\n row = item.replace('|', ',')\n if row:\n res_list.append(row.strip())\n msg = ' '\n for r in res_list:\n if len(r + msg) + 1 > 300:\n msg += r + '\\n'\n else:\n msg += r + '\\n'\n if msg:\n self.bot.sendMessage(self.id, msg)\n else:\n self.bot.sendMessage(self.id, '데이터가 없습니다')\n\n def handle(self,msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n if content_type != 'text':\n self.bot.sendMessage(chat_id, '난 텍스트 이외의 메시지는 처리하지 못해요.')\n return\n\n text = msg['text']\n args = text.split(' ')\n if text.startswith('분실') and len(args)>1:\n print('try to 분실',args[1])\n self.replyData(1,args[1],args[2])\n elif text.startswith('습득') and len(args)>1:\n print('try to 습득', args[1])\n self.replyData(0,args[1],args[2])\n else:\n self.bot.sendMessage(chat_id, '모르는 명령어입니다.\\n분실 또는 습득 지역 물건종류 순서로 입력하세요.')\n\n\nTelegram()", "sub_path": "teller.py", "file_name": "teller.py", "file_ext": "py", "file_size_in_byte": 3224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "telepot.Bot", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 28, "usage_type": "name"}, {"api_name": "urllib.parse.quote", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 31, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 40, "usage_type": "call"}, {"api_name": "telepot.glance", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "229287066", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport ssl\nimport json\nimport csv\nfrom urllib2 import Request, urlopen\nfrom urllib import urlencode\nimport sys\nfrom pprint import pprint\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass etcdApi():\n\n def __init__(self, url, cert, key, ca):\n self.ETCD_API_URL = url\n self.ETCD_CLIENT_CERT = cert\n self.ETCD_CLIENT_KEY = key\n self.ETCD_CA = ca\n assert self.ETCD_API_URL and self.ETCD_CLIENT_CERT and self.ETCD_CLIENT_KEY and self.ETCD_CA, 'Error, please set variable(ETCD_API_URL, ETCD_CLIENT_CERT, ETCD_CLIENT_KEY, ETCD_CA)'\n\n def request_meta(self, uri, data=None, method=None):\n url = '{:s}{:s}'.format(self.ETCD_API_URL, uri)\n print(url)\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n context.check_hostname = False\n context.load_cert_chain(\n certfile=self.ETCD_CLIENT_CERT,\n keyfile=self.ETCD_CLIENT_KEY)\n context.load_verify_locations(self.ETCD_CA)\n context.verify_mode = ssl.CERT_REQUIRED\n\n req = Request(url, data=data, unverifiable=True)\n #req.add_header('Content-Type', 'application/json')\n req.add_header('Content-Type', 'application/x-www-form-urlencoded')\n if method:\n req.get_method = lambda: method\n\n try:\n meta = urlopen(req, context=context, timeout=3)\n except:\n meta = None\n pass\n\n return meta\n\n def get(self, key):\n meta = self.request_meta(key)\n return meta\n\n def update(self, key, data):\n meta = self.request_meta(key, data, method='PUT')\n if not meta:\n return None\n res = json.loads(meta.read())\n return meta\n\n def create(self, key, data):\n meta = self.request_meta(key, data, method='PUT')\n if not meta:\n return None\n res = json.loads(meta.read())\n return meta\n\n def delete(self, key):\n meta = self.request_meta(key, method='DELETE')\n if not meta:\n return None\n res = json.loads(meta.read())\n return meta\n\n\nif __name__ == '__main__':\n conf = json.load(open('etcd_api.json'))\n\n url = conf['url']\n cert = conf['client-cert']\n key = conf['client-key']\n ca = conf['ca']\n\n myobj = etcdApi(url, cert, key, ca)\n csvobj = csv.reader(open('template.csv'))\n next(csvobj)\n\n def get_template(csv):\n for item in csv:\n template = item[0]\n icon = item[1]\n res = myobj.get(\n '/DCE/v1/compose-catalog/{}_kubernetes'.format(template))\n print('{} {} {}'.format(template, res.getcode(), res.msg))\n\n def update_template(csv):\n for item in csv:\n template = item[0]\n icon = item[1]\n meta = json.load(\n myobj.get(\n '/DCE/v1/compose-catalog/{}_kubernetes'.format(template)))\n value = meta['node']['value']\n template_dict = json.loads(value)\n template_dict['icon_url'] = icon\n data = urlencode({'prevExist': 'True',\n 'value': json.dumps(template_dict)})\n res = myobj.update(\n '/DCE/v1/compose-catalog/{}_kubernetes'.format(template), data)\n print('{} {} {}'.format(template, res.getcode(), res.msg))\n pprint(res.read())\n\n def create_template():\n meta = json.load(\n myobj.get('/DCE/v1/compose-catalog/template_kubernetes'))\n value = meta['node']['value']\n template_dict = json.loads(value)\n\n from random import choice\n data = []\n with open('/usr/share/dict/words') as fd:\n for _ in xrange(10):\n template = 'app-'+choice(fd.readlines()).strip().lower()\n data.append((template,))\n fd.seek(0)\n print(data)\n data.insert(0, ('template',))\n with open('template_new.csv', 'wb') as wfd:\n csvobj = csv.writer(wfd)\n csvobj.writerows(data)\n\n csvobj = csv.reader(open('template_new.csv'))\n next(csvobj)\n for template in csvobj:\n template_dict['name'] = template\n data = urlencode({'value': json.dumps(template_dict)})\n res = myobj.create(\n '/DCE/v1/compose-catalog/{}_kubernetes'.format(template), data)\n print('{} {} {}'.format(template, res.getcode(), res.msg))\n pprint(res.read())\n\n def delete_template(csv):\n for item in csv:\n template = item[0]\n res = myobj.delete(\n '/DCE/v1/compose-catalog/{}_kubernetes'.format(template))\n print('{} {} {}'.format(template, res.getcode(), res.msg))\n # update_template(csvobj)\n create_template()\n # delete_template(csvobj)\n", "sub_path": "project/haier/dce_template_logo.py", "file_name": "dce_template_logo.py", "file_ext": "py", "file_size_in_byte": 4847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 12, "usage_type": "call"}, {"api_name": "ssl.SSLContext", "line_number": 27, "usage_type": "call"}, {"api_name": "ssl.PROTOCOL_SSLv23", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ssl.CERT_REQUIRED", "line_number": 33, "usage_type": "attribute"}, {"api_name": "urllib2.Request", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 57, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "json.load", "line_number": 76, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 84, "usage_type": "call"}, {"api_name": "json.load", "line_number": 99, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 103, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 105, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 106, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 110, "usage_type": "call"}, {"api_name": "json.load", "line_number": 113, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 116, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 122, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 128, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 131, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 135, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 135, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "20464191", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, unicode_literals, print_function)\n\n__all__ = ['Regressor', 'Classifier', 'Layer', 'Convolution']\n\nimport os\nimport sys\nimport math\nimport time\nimport logging\nimport itertools\n\nlog = logging.getLogger('sknn')\n\n\nimport numpy\nimport theano\nimport sklearn.base\nimport sklearn.pipeline\nimport sklearn.preprocessing\nimport sklearn.cross_validation\n\nfrom deepy.dataset import MiniBatches, SequentialDataset\nfrom deepy.networks import NeuralRegressor\nfrom deepy.layers import Dense, Softmax, Dropout\nfrom deepy.trainers import MomentumTrainer, LearningRateAnnealer\n\nfrom ...nn import Layer, Convolution, ansi\nfrom ...nn import NeuralNetwork\n\n\nclass MultiLayerPerceptron(NeuralNetwork):\n \"\"\"\n Abstract base class for wrapping the multi-layer perceptron functionality\n from ``deepy``.\n \"\"\"\n\n def _setup(self):\n self.iterations = 0 \n self.trainer = None\n self.mlp = None\n\n @property\n def is_convolution(self):\n return False\n\n def _create_mlp_trainer(self, dataset):\n # Aggregate all the dropout parameters into shared dictionaries.\n dropout_probs, dropout_scales = {}, {}\n for l in [l for l in self.layers if l.dropout is not None]:\n incl = 1.0 - l.dropout\n dropout_probs[l.name] = incl\n dropout_scales[l.name] = 1.0 / incl\n assert len(dropout_probs) == 0 or self.regularize in ('dropout', None)\n\n if self.regularize == 'dropout' or len(dropout_probs) > 0:\n # Use the globally specified dropout rate when there are no layer-specific ones.\n incl = 1.0 - (self.dropout_rate or 0.5)\n default_prob, default_scale = incl, 1.0 / incl\n self.regularize = 'dropout'\n \n # Pass all the parameters to pylearn2 as a custom cost function.\n self.cost = dropout.Dropout(\n default_input_include_prob=default_prob,\n default_input_scale=default_scale,\n input_include_probs=dropout_probs, input_scales=dropout_scales)\n\n # Aggregate all regularization parameters into common dictionaries.\n layer_decay = {}\n if self.regularize in ('L1', 'L2') or any(l.weight_decay for l in self.layers):\n wd = self.weight_decay or 0.0001\n for l in self.layers:\n layer_decay[l.name] = l.weight_decay or wd\n assert len(layer_decay) == 0 or self.regularize in ('L1', 'L2', None)\n\n if len(layer_decay) > 0:\n mlp_default_cost = self.mlp.get_default_cost()\n if self.regularize == 'L1':\n l1 = mlp_cost.L1WeightDecay(layer_decay)\n self.cost = cost.SumOfCosts([mlp_default_cost,l1])\n else: # Default is 'L2'.\n self.regularize = 'L2'\n l2 = mlp_cost.WeightDecay(layer_decay)\n self.cost = cost.SumOfCosts([mlp_default_cost,l2])\n\n return self._create_trainer(dataset, self.cost)\n\n def _create_mlp(self):\n model = NeuralRegressor(input_dim=self.unit_counts[0])\n for l, n in zip(self.layers, self.unit_counts[1:]):\n t = 'relu'\n if l.type == 'Rectifier': t = 'relu'\n if l.type == 'Linear': t = 'linear'\n model.stack_layer(Dense(n, t))\n model.stack_layer(Softmax())\n self.mlp = model\n\n def _initialize(self, X, y=None):\n assert not self.is_initialized,\\\n \"This neural network has already been initialized.\"\n self._create_specs(X, y)\n\n self._create_mlp()\n if y is None:\n return\n\n if self.valid_size > 0.0:\n assert self.valid_set is None, \"Can't specify valid_size and valid_set together.\"\n X, X_v, y, y_v = sklearn.cross_validation.train_test_split(\n X, y,\n test_size=self.valid_size,\n random_state=self.random_state)\n self.valid_set = X_v, y_v\n self.train_set = X, y\n \n self.trainer = MomentumTrainer(self.mlp)\n self.controllers = [\n self,\n LearningRateAnnealer(self.trainer, patience=self.n_stable, anneal_times=0)]\n\n def invoke(self):\n \"\"\"Controller interface for deepy's trainer.\n \"\"\"\n self.iterations += 1\n return bool(self.iterations >= self.n_iter)\n\n @property\n def is_initialized(self):\n \"\"\"Check if the neural network was setup already.\n \"\"\"\n return self.trainer is not None\n\n def _reshape(self, X, y=None):\n # TODO: Common for all backends.\n if y is not None and y.ndim == 1:\n y = y.reshape((y.shape[0], 1))\n if self.is_convolution and X.ndim == 3:\n X = X.reshape((X.shape[0], X.shape[1], X.shape[2], 1))\n if self.is_convolution and X.ndim == 2:\n size = math.sqrt(X.shape[1])\n assert size.is_integer(),\\\n \"Input array is not in image shape, and could not assume a square.\"\n X = X.reshape((X.shape[0], int(size), int(size), 1))\n if not self.is_convolution and X.ndim > 2:\n X = X.reshape((X.shape[0], numpy.product(X.shape[1:])))\n return X, y\n\n def _train_impl(self, X, y):\n self.iterations = 0 \n data = zip(X, y)\n self.dataset = SequentialDataset(data)\n minibatches = MiniBatches(self.dataset, batch_size=20)\n self.trainer.run(minibatches, controllers=self.controllers)\n return self\n\n def _predict_impl(self, X):\n return self.mlp.compute(X)\n\n def _mlp_to_array(self):\n return []\n\n def _array_to_mlp(self, array):\n pass\n", "sub_path": "sknn/backend/deepy/mlp.py", "file_name": "mlp.py", "file_ext": "py", "file_size_in_byte": 5741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "nn.NeuralNetwork", "line_number": 32, "usage_type": "name"}, {"api_name": "deepy.networks.NeuralRegressor", "line_number": 89, "usage_type": "call"}, {"api_name": "deepy.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "deepy.layers.Softmax", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.base.cross_validation.train_test_split", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.base.cross_validation", "line_number": 109, "usage_type": "attribute"}, {"api_name": "sklearn.base", "line_number": 109, "usage_type": "name"}, {"api_name": "deepy.trainers.MomentumTrainer", "line_number": 116, "usage_type": "call"}, {"api_name": "deepy.trainers.LearningRateAnnealer", "line_number": 119, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 145, "usage_type": "call"}, {"api_name": "deepy.dataset.SequentialDataset", "line_number": 151, "usage_type": "call"}, {"api_name": "deepy.dataset.MiniBatches", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "350299934", "text": "# -*- coding: utf-8 -*-\r\nimport sys\r\nimport json\r\nimport time\r\nimport tornado.httpserver\r\nimport tornado.ioloop\r\nimport tornado.options\r\nimport tornado.web\r\nfrom tornado.options import define, options\r\n\r\nfrom Dstorage import D2DB\r\n\r\n\r\nimport intent_similarity\r\n\r\n#define(\"port\", default=9000, help=\"run on the given port\", type=int)\r\nfrom online_search import search_summary\r\nimport interface_v0\r\n\r\n#def warmup():\r\n# my_intent_similarity = intent_similarity.Intent_similarity('sgns.sogou.word', 'sogou_dict.txt')\r\n# my_intent_similarity.build_corpus()\r\n# return my_intent_similarity\r\nfrom warmup import Warmup\r\n#w = Warmup()\r\n#my_intent_similarity_instance = w.warmup()\r\nmy_intent_similarity_instance = Warmup.mis\r\n\r\nimport pattern2redis\r\n\r\n#管理租户相似度阈值\r\nclass RedisHandler(tornado.web.RequestHandler):\r\n def post(self):\r\n redis_op = pattern2redis.RedisOp()\r\n post_message = self.request.body.decode('utf-8')\r\n post_message = json.loads(post_message)\r\n print(post_message)\r\n user_id = post_message['user_id']\r\n threshold = float(post_message['threshold'])\r\n try:\r\n assert isinstance(user_id,str) and (threshold >= 0 and threshold <=1)\r\n flag = post_message['flag']\r\n if flag == 'getList':\r\n respond = redis_op.getListRedis(user_id)\r\n elif flag == 'setList':\r\n respond = redis_op.setListRedis(user_id,threshold)\r\n data = {'user_id':user_id,'respond':respond}\r\n data = json.dumps(data)\r\n self.write(data)\r\n except:\r\n data = {'user_id':user_id,'respond':'operation fail'}\r\n data = json.dumps(data)\r\n self.write(data)\r\n\r\n# Web主方法入口\r\nclass TestHandler_v0(tornado.web.RequestHandler):\r\n \r\n def post(self): \r\n st = time.time()\r\n try: \r\n print(\"---Hello---\") \r\n post_message = self.request.body.decode('utf-8')\r\n print(post_message)\r\n #print(type(post_message)) #str \r\n post_message = json.loads(post_message)\r\n #res_time = time.time()-st\r\n print(post_message)\r\n #print(type(post_message))\r\n user_input1 = str(post_message['user_input'])\r\n user_input2 = str(post_message['user_id'])\r\n user_input3 = str(post_message['num'])\r\n user_input4 = str(post_message['ins_index'])\r\n\r\n inter = interface_v0.Interface()\r\n # user_input:用户的提问问题\r\n # user_id: (暂时不用)\r\n # ins_index:租户的id(影响到后续语料库选择)\r\n # num:删选的判定规则(0表示如果均低于阈值,则不返回,1表示如果均低于阈值,返回相似度最高的一条...)\r\n post_mes = {'user_input':user_input1,'user_id':user_input2,'ins_index':user_input4,'num':user_input3}\r\n respond_message,Dsour,RES_TIME_ES,RES_TIME_AIML,RES_TIME_BD, error = inter.parse_respond(post_mes,my_intent_similarity_instance)\r\n\r\n json_dumps = json.dumps(respond_message)\r\n res_time = time.time() - st\r\n user_input = post_message['user_input']\r\n # instance_index\r\n ins_index = post_message['ins_index']\r\n respond_array = respond_message['rep']\r\n SERV_FLAG = 1\r\n D2DB(user_input, ins_index, respond_array, Dsour, res_time, SERV_FLAG, RES_TIME_ES, RES_TIME_AIML,\r\n RES_TIME_BD, error)\r\n\r\n self.write(respond_message) #因为这是一个list类型,所以要取出其中的dict\r\n\r\n \r\n \r\n except Exception as e: \r\n error = e\r\n user_input = post_message['user_input']\r\n ins_index = post_message['ins_index']\r\n respond_array = [{'a':str(e),'q':''}]\r\n error = e\r\n Dsour = None\r\n res_time = time.time()-st\r\n SERV_FLAG = 0\r\n D2DB(user_input,ins_index,respond_array,Dsour,res_time,SERV_FLAG,RES_TIME_ES,RES_TIME_AIML,RES_TIME_BD,error)\r\n\r\n\r\n self.write({\"错误描述:\":error})\r\n \r\n \r\n\r\n def set_default_headers(self):\r\n\r\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\r\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\r\n self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\r\n \r\n\r\n# web入口\r\nif __name__ == \"__main__\":\r\n print(\"---Start Running---\")\r\n tornado.options.parse_command_line()\r\n app = tornado.web.Application(handlers=[(r\"/Bot\", TestHandler_v0)])\r\n http_server = tornado.httpserver.HTTPServer(app) \r\n http_server.listen(options.port) \r\n tornado.ioloop.IOLoop.instance().start()\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "my_faq_server_v0.py", "file_name": "my_faq_server_v0.py", "file_ext": "py", "file_size_in_byte": 4766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "warmup.Warmup.mis", "line_number": 27, "usage_type": "attribute"}, {"api_name": "warmup.Warmup", "line_number": 27, "usage_type": "name"}, {"api_name": "tornado.httpserver.web", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tornado.httpserver", "line_number": 32, "usage_type": "name"}, {"api_name": "pattern2redis.RedisOp", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 52, "usage_type": "call"}, {"api_name": "tornado.httpserver.web", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tornado.httpserver", "line_number": 56, "usage_type": "name"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "interface_v0.Interface", "line_number": 74, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "Dstorage.D2DB", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "Dstorage.D2DB", "line_number": 105, "usage_type": "call"}, {"api_name": "tornado.httpserver.options.parse_command_line", "line_number": 122, "usage_type": "call"}, {"api_name": "tornado.httpserver.options", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tornado.httpserver", "line_number": 122, "usage_type": "name"}, {"api_name": "tornado.httpserver.web.Application", "line_number": 123, "usage_type": "call"}, {"api_name": "tornado.httpserver.web", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tornado.httpserver", "line_number": 123, "usage_type": "name"}, {"api_name": "tornado.httpserver.httpserver.HTTPServer", "line_number": 124, "usage_type": "call"}, {"api_name": "tornado.httpserver.httpserver", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tornado.httpserver", "line_number": 124, "usage_type": "name"}, {"api_name": "tornado.options.options.port", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 125, "usage_type": "name"}, {"api_name": "tornado.httpserver.ioloop.IOLoop.instance", "line_number": 126, "usage_type": "call"}, {"api_name": "tornado.httpserver.ioloop", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tornado.httpserver", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "260543786", "text": "from dataset.box_loader import BBoxLoader\n\nfrom prediction.helper import *\nfrom prediction.model.loss import cal_loss\nfrom prediction.model.model_helper import create_model\n\nvis = False\nconfig = Config()\nrp_index = True\n\n\n@torch.no_grad()\ndef eval():\n model = create_model(config, load_state=True)\n config.batch_size = 1\n config.video_dir_root = config.video_dir_roots[2]\n dataset = BBoxLoader().get_data_loader(shuffle=False)\n\n all_loss = []\n for data in dataset:\n train_x, train_y, path, _ = data\n path = path[-1]\n\n train_x = train_x[:, :, :, :4]\n if rp_index:\n x = rp_index_np(train_x)\n else:\n x = train_x.numpy()\n\n y = remove_zero_bbox(x[:, -1, ...])\n x = remove_zero_bbox(x[:, :-1, ...])\n\n x = torch.from_numpy(x).float()\n y = torch.from_numpy(y).float()\n if config.cuda:\n x = x.cuda()\n y = y.cuda()\n\n output = model(x)\n\n loss, metrics = cal_loss(output, y)\n all_loss.append(loss.item())\n\n if vis:\n output = remove_zero_bbox(output)\n img = get_image(path)\n output = rescale(output, img.shape)\n\n target = remove_zero_bbox(y)\n target = rescale(target, img.shape)\n\n cv2.imshow('pred', render_bbox(output, img))\n cv2.imshow('gt', render_bbox(target, img))\n cv2.waitKey(100000)\n\n avg = sum(all_loss) / len(all_loss)\n print('avg loss', avg)\n\n\ndef write(path, loss):\n with open(path, 'w') as f:\n for l in loss:\n f.write('%s\\n' % l)\n\n\nif __name__ == '__main__':\n eval()\n", "sub_path": "python/prediction/eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 1645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "prediction.model.model_helper.create_model", "line_number": 14, "usage_type": "call"}, {"api_name": "dataset.box_loader", "line_number": 17, "usage_type": "name"}, {"api_name": "dataset.box_loader.BBoxLoader", "line_number": 17, "usage_type": "call"}, {"api_name": "dataset.box_loader", "line_number": 20, "usage_type": "name"}, {"api_name": "prediction.model.loss.cal_loss", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "605788078", "text": "import requests\nfrom jsonschema import validate\n\nurl = \"https://dog.ceo/api/breeds/list/random\"\nproxy = {\"https\": \"localhost:8080\"}\n\ndef test_structure_of_schema():\n \"\"\" Проверка структуры ответа на запрос /breeds/list/random \"\"\"\n response = requests.get(url, proxies=proxy, verify=False)\n schema = {\n \"message\": \"object\",\n \"status\": \"success\"\n }\n validate(instance=response.json(), schema=schema)\n", "sub_path": "PythonQAOtus_Lesson28/Test_REST_API_1/test_3_API_1.py", "file_name": "test_3_API_1.py", "file_ext": "py", "file_size_in_byte": 458, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "453876462", "text": "import cv2\nimport numpy as np\nimport skimage.measure\n\nimg = cv2.imread('two.jpg', 0)\nkernel2 = np.array([[1, 0, -1],\n [0, 0, 0],\n [-1, 0, 1]])\ncov = cv2.filter2D(img, -1, kernel2)\npool = skimage.measure.block_reduce(cov, (2, 2), np.max)\ncv2.imshow('image', cov)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "sub_path": "try/pool1.py", "file_name": "pool1.py", "file_ext": "py", "file_size_in_byte": 339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 9, "usage_type": "call"}, {"api_name": "skimage.measure.measure.block_reduce", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.measure.measure", "line_number": 10, "usage_type": "attribute"}, {"api_name": "skimage.measure", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "237302177", "text": "import pygame\nimport random \nimport math\npygame.init()\n\n# screen\nscreen = pygame.display.set_mode((1000,800))\n\n# tilte and icon\npygame.display.set_caption(\"Space Invaders\")\nicon = pygame.image.load(\"icon.png\")\npygame.display.set_icon(icon)\n \n# background\nbackground = pygame.image.load(\"background.jpg\")\n\n# score\nscoreCount = 0\nfont = pygame.font.Font(\"freesansbold.ttf\",32)\nscoreX = 10\nscoreY = 10\n\ndef score(x,y):\n sc = font.render(\"Score : \" + str(scoreCount), True, (255,255,255))\n screen.blit(sc,(x,y))\n\n# player\nplayerImage = pygame.image.load(\"player.png\")\nplayerX = 468\nplayerY = 720\nplayerDelta = 0\nplayerDeltaChange = 0.8\n\n# enemy\nenemyNumber = 5\nenemyImage = []\nenemyX = []\nenemyY = []\nenemyDelta = 0.25\nenemyState = []\n\nfor i in range(enemyNumber):\n enemyImage.append(pygame.image.load(\"enemy.png\"))\n enemyX.append(0)\n enemyY.append(-100)\n enemyState.append(\"Ready\")\n\n# bullet\nbulletImage = pygame.image.load(\"bullet.png\")\nbulletX = -100\nbulletY = -32\nbulletDelta = 1\nbulletState = \"Ready\"\n\ndef player(x,y):\n screen.blit(playerImage,(x,y))\n\ndef enemy(x,y,i):\n screen.blit(enemyImage[i],(x[i],y[i]))\n\ndef bullet(x,y):\n screen.blit(bulletImage,(x,y))\n\ndef shoot(x,y):\n global bulletState, bulletX, bulletY\n bulletState = \"Going\"\n bulletX = x + 16\n bulletY = y - 32\n\ndef isCollide_forBullet(eneX,eneY,bullX,bullY):\n if bullY + 32 < eneY or eneY + 64 < bullY :\n return False\n if bullX + 32 < eneX or eneX + 64 < bullX :\n return False\n return True \n\ndef isCollide_forPlayer(eneX,eneY,playX,playY):\n if eneY + 64 < playY or playY + 64 < eneY:\n return False\n if eneX + 64 < playX or playX + 64 < eneX:\n return False\n return True\n\ndef resetBullet():\n global bulletX,bulletY,bulletState\n bulletX = -100\n bulletY = -32\n bulletState = \"Ready\"\n\n# main loop\nrunning = True\nwhile running:\n# background\n screen.fill((0,0,0))\n screen.blit(background,(0,0))\n# quit\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n# player movement\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n playerDelta = - playerDeltaChange\n if event.key == pygame.K_RIGHT:\n playerDelta = playerDeltaChange\n \n if event.key == pygame.K_SPACE:\n if bulletState == \"Ready\":\n shoot(playerX,playerY)\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT: \n playerDelta = 0 \n\n playerX += playerDelta\n\n# border\n if playerX < 0:\n playerX = 0\n if playerX > 936:\n playerX = 936\n\n# enemy spawn\n if (random.randint(0,3000) == 1):\n for i in range(enemyNumber):\n if enemyState[i] == \"Ready\":\n enemyState[i] = \"Going\"\n enemyX[i] = random.randint(0,936)\n enemyY[i] = 0\n break\n \n# bullet despawn\n if bulletY < 0 and bulletState == \"Going\":\n resetBullet()\n\n# enemy movement \n for i in range(enemyNumber):\n if enemyState[i] == \"Going\":\n enemyY[i] += enemyDelta\n \n for i in range(enemyNumber):\n if enemyY[i] > 800:\n enemyY[i] = -100\n enemyState[i] = \"Ready\"\n\n# bullet movement\n if (bulletState == \"Going\"):\n bulletY -= bulletDelta\n\n# enemy hit\n for i in range(enemyNumber):\n if isCollide_forBullet(enemyX[i],enemyY[i],bulletX,bulletY):\n enemyY[i] = -100\n enemyState[i] = \"Ready\"\n resetBullet()\n scoreCount += 1\n \n# player hit\n for i in range(enemyNumber):\n if isCollide_forPlayer(enemyX[i],enemyY[i],playerX,playerY):\n playerX = -64\n playerY = -64\n\n for i in range(enemyNumber):\n enemy(enemyX,enemyY,i) \n bullet(bulletX,bulletY)\n player(playerX,playerY)\n\n score(scoreX,scoreY)\n pygame.display.update()\n\n", "sub_path": "SpaceInvader.py", "file_name": "SpaceInvader.py", "file_ext": "py", "file_size_in_byte": 4035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.init", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 112, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 124, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 170, "usage_type": "attribute"}]} +{"seq_id": "154664367", "text": "# This file is associated with the book\n# \"Machine Learning Refined\", Cambridge University Press, 2016.\n# by Jeremy Watt, Reza Borhani, and Aggelos Katsaggelos.\n\nimport numpy as np\nimport numpy.matlib\nimport matplotlib.pyplot as plt\nfrom __future__ import division\n\n### load data\ndef load_data(csvname):\n data = np.array(np.genfromtxt(csvname, delimiter=','))\n X = data[:,0:-1]\n y = data[:,-1]\n y = np.reshape(y,(np.size(y),1))\n return X,y\n\n# avoid overflow when using exp - just cutoff after arguments get too large/small\ndef my_exp(u):\n s = np.argwhere(u > 100)\n t = np.argwhere(u < -100)\n u[s] = 0\n u[t] = 0\n u = np.exp(u)\n u[t] = 1\n return u\n\n# sigmoid function for writing things more compactly\ndef sigmoid(z):\n return 1/(1+my_exp(-z))\n\n# compute the current value of the cost function\ndef compute_cost(z,H,A):\n F = np.zeros((M,np.shape(A)[1]))\n for p in np.arange(0,np.shape(A)[1]):\n F[:,p] = np.ravel(np.tanh(z + np.dot(H.T,np.reshape(A[:,p],(np.shape(A)[0],1)))))\n return F\n\n# gradient descent for single layer tanh nn \ndef gradient_descent(X,y,M):\n # initializations\n N = np.shape(X)[0]\n P = np.shape(X)[1]\n\n b = np.random.randn()\n w = np.random.randn(M,1)\n c = np.random.randn(M,1)\n V = np.random.randn(N,M)\n l_P = np.ones((P,1))\n\n # stoppers\n max_its = 10000\n\n ### main ###\n for k in range(max_its):\n\n F = compute_cost(c,V,X)\n\n # calculate gradients\n q = sigmoid(-y*(b + np.dot(F.T,w)))\n grad_b = - np.dot(l_P.T,(q*y))\n grad_w = np.zeros((M,1))\n grad_c = np.zeros((M,1))\n grad_V = np.zeros((N,M))\n\n for n in np.arange(0,M):\n t = np.tanh(c[n] + np.dot(X.T,V[:,n]))\n t = np.reshape(t,(np.size(t),1))\n s = (1/np.cosh(c[n] + np.dot(X.T,V[:,n])))**2\n s = np.reshape(s,(np.size(s),1))\n grad_w[n] = - np.dot(l_P.T,(q*t*y))\n grad_c[n] = - np.dot(l_P.T,(q*s*y)*w[n])\n grad_V[:,n] = - np.ravel(np.dot(X,(q*s*y)*w[n]))\n\n # determine steplength\n alpha = 1e-2\n\n # take gradient steps\n b = b - alpha*grad_b\n w = w - alpha*grad_w\n c = c - alpha*grad_c\n V = V - alpha*grad_V\n\n return b, w, c, V\n\n# plot points\ndef plot_points(X,y):\n ind = np.nonzero(y==1)[0]\n plt.plot(X[ind,0],X[ind,1],'ro')\n ind = np.nonzero(y==-1)[0]\n plt.plot(X[ind,0],X[ind,1],'bo')\n plt.hold(True)\n\n# plot the seprator + surface\ndef plot_separator(b,w,c,V,X,y):\n s = np.arange(-1,1,.01)\n s1, s2 = np.meshgrid(s,s)\n\n s1 = np.reshape(s1,(np.size(s1),1))\n s2 = np.reshape(s2,(np.size(s2),1))\n g = np.zeros((np.size(s1),1))\n\n t = np.zeros((2,1))\n for i in np.arange(0,np.size(s1)):\n t[0] = s1[i]\n t[1] = s2[i]\n F = compute_cost(c,V,t)\n g[i] = np.tanh(b + np.dot(F.T,w))\n\n s1 = np.reshape(s1,(np.size(s),np.size(s)))\n s2 = np.reshape(s2,(np.size(s),np.size(s)))\n g = np.reshape(g,(np.size(s),np.size(s)))\n\n # plot contour in original space\n plt.contour(s1,s2,g,1,color = 'k')\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.hold(True)\n \n# load data\nX, y = load_data('genreg_data.csv')\nM = 4 # number of basis functions to use / hidden units\n\n# perform gradient descent to fit tanh basis sum\nb,w,c,V = gradient_descent(X.T,y,M)\n\n# plot resulting fit\nfig = plt.figure(facecolor = 'white',figsize = (4,4))\nplot_points(X,y)\nplot_separator(b,w,c,V,X,y)\nplt.show()", "sub_path": "mlrefined_hw_solutions_backup/mlrefined_hw_coding_solutions/Python2_solutions/Chapter_6/Exercise_6_5/exercise_6_5_solution.py", "file_name": "exercise_6_5_solution.py", "file_ext": "py", "file_size_in_byte": 3606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.cosh", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.nonzero", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.NullLocator", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.NullLocator", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "159740416", "text": "import logging\nfrom .UI_Abstract_Controls import UI_Abstract_Controls\n\n\nclass UI_Status_Controls(UI_Abstract_Controls):\n def __init__(self, app):\n super().__init__(app)\n\n def bind_controls(self):\n self.logger.debug('Binding status controls')\n self.app.parser.gotStatus.connect(\n lambda _, __, axis_1_position, axis_1_speed,\n ___, axis_2_position, axis_2_speed:\n self.update_status(axis_1_position, axis_1_speed, axis_2_position, axis_2_speed))\n\n def update_status(self, axis_1_position, axis_1_speed, axis_2_position, axis_2_speed):\n status_logger = logging.getLogger('status')\n status_logger.info(f'{axis_1_position}; {axis_2_position}; {axis_1_speed}; {axis_2_speed}')\n self.app.axis_1_position_display.display(axis_1_position)\n self.app.axis_1_speed_display.display(axis_1_speed)\n\n self.app.axis_2_position_display.display(axis_2_position)\n self.app.axis_2_speed_display.display(axis_2_speed)\n", "sub_path": "UI/UI_Status_Controls.py", "file_name": "UI_Status_Controls.py", "file_ext": "py", "file_size_in_byte": 1008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "UI_Abstract_Controls.UI_Abstract_Controls", "line_number": 5, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "517047613", "text": "import math\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom core.helpers import *\n\n\nclass LogisticRegressor:\n def __init__(self, sales_generator, observations_count):\n self.sales_generator = sales_generator\n self.observations_count = observations_count\n\n self.products = range(2)\n self.observations = range(self.observations_count)\n\n def train(self, coefficients=None):\n # Calculate sale probabilities\n prices, competitor_prices, ranks, sale_probs = self.sales_generator.generate(self.observations_count, coefficients)\n\n # Run regression\n explanatory_vars = [self.get_explanatory_vars(i, ranks, prices, competitor_prices) for i in self.products]\n coeffs = [self.fit_model(explanatory_vars[i], sale_probs[i]) for i in self.products]\n\n return coeffs\n\n def train_iteratively(self, coefficients=None):\n # Calculate sale probabilities\n prices, competitor_prices, ranks, sale_probs = self.sales_generator.generate(self.observations_count, coefficients)\n\n # Determine lower observations count bound depending on generated situation\n min_observations = 2\n max_observations = self.observations_count\n for k in range(min_observations, max_observations + 1):\n if all([len(np.unique(sale_probs[i][:k])) == 2 for i in self.products]):\n min_observations = k\n break\n\n # Run regressions\n observations_count = max_observations - min_observations + 1\n explanatory_vars = [self.get_explanatory_vars(i, ranks, prices, competitor_prices) for i in self.products]\n coeffs = np.empty(shape=(observations_count, 2, len(explanatory_vars[0])))\n for k in range(min_observations, max_observations + 1):\n coeffs[k - min_observations] = [self.fit_model(explanatory_vars[i], sale_probs[i], k) for i in self.products]\n\n sale_probs = np.swapaxes(sale_probs, 0, 1)\n coeffs = np.swapaxes(np.swapaxes(coeffs, 0, 1), 1, 2).tolist()\n\n return coeffs, prices, competitor_prices, sale_probs, min_observations, max_observations\n\n def get_explanatory_vars(self, product, ranks, prices, competitor_prices):\n explanatory_1 = [1] * self.observations_count\n explanatory_2 = [ranks[k, product] for k in self.observations]\n explanatory_3 = [prices[k, product] - np.min(competitor_prices[k]) for k in self.observations]\n explanatory_4 = [prices[k, product] - min(prices[k, i] for i in self.products) for k in self.observations]\n explanatory_5 = list(map(lambda x: math.pow(x, 2), explanatory_4))\n\n return np.matrix([explanatory_1, explanatory_2, explanatory_3, explanatory_4, explanatory_5])\n\n def fit_model(self, explanatory_vars, sale_probs, n=None):\n if n is None:\n n = len(sale_probs)\n\n regressor = LogisticRegression(fit_intercept=False)\n model = regressor.fit(explanatory_vars.transpose()[:n], sale_probs[:n])\n coeffs = model.coef_[0].tolist()\n\n return coeffs\n", "sub_path": "core/regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 3049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "math.pow", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "373571715", "text": "import csv\nimport time\nimport unittest\n\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\n\nfrom Data.parameters import Data\nfrom TS.arg import arg\nfrom TS.reuse_func import cqube\nfrom get_dir import pwd\n\n\nclass Blockdata_validation(unittest.TestCase):\n def setUp(self):\n driver_path = pwd()\n self.driver = webdriver.Chrome(driver_path.get_driver_path())\n driver = cqube(self.driver)\n driver.open_cqube_appln()\n driver = cqube(self.driver)\n driver.login_cqube()\n driver.navigate_to_student_report()\n time.sleep(5)\n\n def test_validate_schoolrecords(self):\n distoption = self.driver.find_elements_by_xpath(Data.Dnames)\n blockoption = self.driver.find_elements_by_xpath(Data.Bnames)\n clusteroption = self.driver.find_elements_by_xpath(Data.cnames)\n for i in range(len(distoption)):\n distoption[i].click()\n for j in range(len(blockoption)):\n blockoption[j].click()\n for k in range(len(clusteroption)):\n clusteroption[k].click()\n\n lists = self.driver.find_elements_by_class_name(Data.dots)\n count = len(lists)-1\n self.assertNotEqual(0,count,msg=\"Block Does not contains Data\")\n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()", "sub_path": "cQube_Testing/cQube_Components/SAR/Click_on_District_Block_clusters.py", "file_name": "Click_on_District_Block_clusters.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "get_dir.pwd", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "TS.reuse_func.cqube", "line_number": 18, "usage_type": "call"}, {"api_name": "TS.reuse_func.cqube", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "Data.parameters.Data.Dnames", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Data.parameters.Data", "line_number": 26, "usage_type": "name"}, {"api_name": "Data.parameters.Data.Bnames", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Data.parameters.Data", "line_number": 27, "usage_type": "name"}, {"api_name": "Data.parameters.Data.cnames", "line_number": 28, "usage_type": "attribute"}, {"api_name": "Data.parameters.Data", "line_number": 28, "usage_type": "name"}, {"api_name": "Data.parameters.Data.dots", "line_number": 36, "usage_type": "attribute"}, {"api_name": "Data.parameters.Data", "line_number": 36, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "51061939", "text": "from __future__ import division, print_function, absolute_import\nimport os\nimport argparse\nimport datetime\nimport matplotlib\nimport numpy as np\nimport scipy.io\nimport tensorflow as tf\n\t\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nparser = argparse.ArgumentParser(description = 'aeImpute')\n\nparser.add_argument('--debug', type = str, nargs = '+', help = \"Want debug statements.\", choices = ['yes', 'no'])\n\nargs = parser.parse_args()\n\ninitTime1 = datetime.datetime.now()\n\nuserItemMatrix = scipy.io.mmread(\"../../fully_proessed_data/mouse_brain/whole_matrix_ip2matlab_mouseBrain\")\n\nuserItemMatrix = userItemMatrix.toarray()\nuserItemMatrix = np.array(userItemMatrix)\nnum_features = userItemMatrix.shape[1]\ndata_entries = userItemMatrix.shape[0]\nprint(num_features)\nX = tf.placeholder(\"float32\", [None, num_features])\nmask = tf.placeholder(\"float32\", [None, num_features])\n\n\nmatrix_mask = userItemMatrix.copy()\nmatrix_mask[matrix_mask.nonzero()] = 1 \n\nlearning_rate = 1e-4\nnum_hidden_1 = [7000]\nlambda_list = [1]\n\nfor hidden_layer_1 in num_hidden_1:\n\tfor lambda_val in lambda_list:\n\t\tprint(\"lambda : \" + str(lambda_val))\n\t\tprint(\"hidden layer : \" + str(hidden_layer_1))\n\t\tweights = {\n\t\t\t'encoder_h1': tf.Variable(tf.random_normal([num_features, hidden_layer_1])),\n\t\t\t'decoder_h1': tf.Variable(tf.random_normal([hidden_layer_1, num_features])),\n\t\t\t}\n\t\tbiases = {\n\t\t\t'encoder_b1': tf.Variable(tf.random_normal([hidden_layer_1])),\n\t\t\t'decoder_b1': tf.Variable(tf.random_normal([num_features])),\n\t\t}\n\t\t\n\t\tdef encoder(x):\n\t\t\tlayer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))\n\t\t\treturn layer_1\n\n\t\tdef decoder(x):\n\t\t\tlayer_1 = tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1'])\n\t\t\treturn layer_1\n\t\t\n\t\tencoder_op = encoder(X)\n\t\tdecoder_op = decoder(encoder_op)\n\n\t\ty_pred = decoder_op\n\t\ty_true = X\t\t\n\t\trmse_loss = tf.pow(tf.norm(y_true - y_pred * mask), 2)\n\t\tregularization = tf.multiply(tf.constant(lambda_val/2.0, dtype=\"float32\"), tf.add(tf.pow(tf.norm(weights['decoder_h1']), 2), tf.pow(tf.norm(weights['encoder_h1']), 2)))\n\t\tloss = tf.add(tf.reduce_mean(rmse_loss), regularization)\n\t\toptimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)\n\t\t# optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(rmse_loss)\n\n\t\tdisplay_step = 1\n\n\t\tinit = tf.global_variables_initializer()\n\t\twith tf.Session() as sess:\n\t\t\tsess.run(init)\n\t\t\tnum_steps = 6000\n\t\t\tprev_loss = 0\n\t\t\tthreshold = 1e-5\n\t\t\tfor k in range(1, num_steps+1):\n\t\t\t\t_, l = sess.run([optimizer, rmse_loss], feed_dict={X: userItemMatrix, mask: matrix_mask})\n\t\t\t\tlpentry = l/data_entries\n\t\t\t\tprint (lpentry)\n\t\t\t\tchange = abs(prev_loss - lpentry)\n\t\t\t\tif ( change <= threshold):\n\t\t\t\t\tprint(\"Reached the threshold value.\")\n\t\t\t\t\tbreak\n\t\t\t\tprev_loss = lpentry\n\t\t\t\tif( args.debug[0] == 'yes' ):\n\t\t\t\t\tif k % display_step == 0 or k == 1:\n\t\t\t\t\t\tprint('Step %i : Loss: %f <-> %f, %f' % (k, l, lpentry, change))\n\t\t\ttrained_data = sess.run([y_pred], feed_dict={X: userItemMatrix, mask: matrix_mask})\n\t\t\tscipy.io.savemat(\"mouseBrain_\" + str(lambda_val) + \"_\" + str(hidden_layer_1) + \".mat\", mdict = {\"arr\" : trained_data})\n\ninitTime2 = datetime.datetime.now()\nprint(\"Total time taken = {0}\".format(initTime2 - initTime1))\n", "sub_path": "Autoencoder Scripts/Imputation/autoencoder-mouse_brain.py", "file_name": "autoencoder-mouse_brain.py", "file_ext": "py", "file_size_in_byte": 3316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "scipy.io.io.mmread", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 25, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.pow", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.pow", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.norm", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.train.RMSPropOptimizer", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 78, "usage_type": "call"}, {"api_name": "scipy.io.io.savemat", "line_number": 96, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 96, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 96, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "391209385", "text": "import pymysql\nimport time\nimport json\n\n\n# change the information in database.json\nf = open(\"database.json\", 'r')\nline = f.readline()\ndatabaseInfo = json.loads(line)\nf.close()\n\n\n# database config\nDB_HOST = databaseInfo['DB_HOST']\nDB_USER = databaseInfo['DB_USER']\nDB_PASSWORD = databaseInfo['DB_PASSWORD']\nDB_NAME = databaseInfo['DB_NAME']\n\n\ndef databaseConnect():\n conn = pymysql.connect(\n host=DB_HOST,\n user=DB_USER,\n password=DB_PASSWORD,\n db=DB_NAME,\n charset='utf8')\n return conn\n\n\ndef databaseClose(conn):\n conn.close()\n\n\n# 데이터베이스의 데이터를 초기의 상태로 돌립니다\ndef initData(conn):\n curs = conn.cursor()\n sql = '''\n delete from tbl_problem_n1;\n '''\n curs.execute(sql)\n sql = '''\n delete from tbl_problem_n2;\n '''\n curs.execute(sql)\n sql = '''\n delete from tbl_problem_n3;\n '''\n curs.execute(sql)\n sql = '''\n delete from tbl_problem_n4;\n '''\n curs.execute(sql)\n sql = '''\n delete from tbl_problem_n5;\n '''\n curs.execute(sql)\n sql = '''\n delete from tbl_problem_free;\n '''\n curs.execute(sql)\n sql = '''\n update tbl_japan_store\n set check_each_yn = 'N', check_free_yn = 'N';\n '''\n curs.execute(sql)\n conn.commit()\n\n\n# 봇이 문제를 풀어줍니다\ndef botSolveProblem(conn, level):\n curs = conn.cursor()\n sql = '''\n update tbl_problem_n1\n set user_id = 1, modify_date = now()\n where user_id is null;\n '''\n curs.execute(sql)\n conn.commit()\n\n\nif __name__ == \"__main__\":\n conn = databaseConnect()\n \"\"\"\n # Sample Code -----------------\n initData(conn)\n botSolveProblem(conn, 'n1')\n botSolveProblem(conn, 'n2')\n botSolveProblem(conn, 'n3')\n botSolveProblem(conn, 'n4')\n botSolveProblem(conn, 'n5')\n \"\"\"\n\n REPEAT_SEC = 3\n while(True):\n botSolveProblem(conn, 'n1')\n print('INFO -> Bot solved the quiz')\n time.sleep(REPEAT_SEC)\n\n databaseClose(conn)\n", "sub_path": "app_daemon/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2056, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "114844156", "text": "# -*- coding: utf-8 -*-\n\n# Copyright 2010-2011 OpenStack Foundation\n# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport os.path\nimport urllib\nfrom wsgiref.util import setup_testing_defaults\n\nimport fixtures\nimport swiftclient\nimport testtools\n\nimport os_loganalyze.wsgi as log_wsgi\n\n\n_TRUE_VALUES = ('true', '1', 'yes')\n\n\ndef samples_path(append_folder='samples'):\n \"\"\"Create an abs path for our test samples\n\n Because the wsgi has a security check that ensures that we don't\n escape our root path, we need to actually create a full abs path\n for the tests, otherwise the sample files aren't findable.\n \"\"\"\n return (os.path.normpath(\n os.path.join(os.getcwd(), 'os_loganalyze/tests', append_folder)) +\n os.sep)\n\n\nclass TestCase(testtools.TestCase):\n\n \"\"\"Test case base class for all unit tests.\"\"\"\n\n def setUp(self):\n \"\"\"Run before each test method to initialize test environment.\"\"\"\n\n super(TestCase, self).setUp()\n test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)\n try:\n test_timeout = int(test_timeout)\n except ValueError:\n # If timeout value is invalid do not set a timeout.\n test_timeout = 0\n if test_timeout > 0:\n self.useFixture(fixtures.Timeout(test_timeout, gentle=True))\n\n self.useFixture(fixtures.NestedTempfile())\n self.useFixture(fixtures.TempHomeDir())\n\n if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:\n stdout = self.useFixture(fixtures.StringStream('stdout')).stream\n self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))\n if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:\n stderr = self.useFixture(fixtures.StringStream('stderr')).stream\n self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))\n\n self.log_fixture = self.useFixture(fixtures.FakeLogger())\n self.samples_directory = 'samples'\n self.wsgi_config_file = samples_path('samples') + 'wsgi.conf'\n\n def _start_response(self, *args):\n return\n\n def fake_env(self, **kwargs):\n environ = dict(**kwargs)\n setup_testing_defaults(environ)\n return environ\n\n def get_generator(self, fname, level=None, html=True,\n limit=None, source=None):\n kwargs = {'PATH_INFO': '/htmlify/%s' % fname}\n qs = {}\n if level:\n qs['level'] = level\n if limit:\n qs['limit'] = limit\n if source:\n qs['source'] = source\n if qs:\n kwargs['QUERY_STRING'] = urllib.urlencode(qs)\n\n if html:\n kwargs['HTTP_ACCEPT'] = 'text/html'\n\n gen = log_wsgi.application(\n self.fake_env(**kwargs),\n self._start_response,\n root_path=samples_path(self.samples_directory),\n wsgi_config=self.wsgi_config_file)\n\n return iter(gen)\n\n\nclass TestSwiftFiles(TestCase):\n\n \"\"\"Test case with fake swift object.\"\"\"\n\n def setUp(self):\n def fake_get_object(self, container, name, resp_chunk_size=None):\n if resp_chunk_size:\n\n def _object_body():\n with open(samples_path('samples') + name) as f:\n\n buf = f.read(resp_chunk_size)\n while buf:\n yield buf\n buf = f.read(resp_chunk_size)\n\n object_body = _object_body()\n else:\n with open(samples_path('samples') + name) as f:\n object_body = f.read()\n return [], object_body\n\n swiftclient.client.Connection.get_object = fake_get_object\n super(TestSwiftFiles, self).setUp()\n\n # Set the samples directory to somewhere non-existent so that swift\n # is checked for files\n self.samples_directory = 'non-existent'\n", "sub_path": "os_loganalyze/tests/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 4458, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.normpath", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 41, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 42, "usage_type": "attribute"}, {"api_name": "testtools.TestCase", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 53, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "fixtures.Timeout", "line_number": 60, "usage_type": "call"}, {"api_name": "fixtures.NestedTempfile", "line_number": 62, "usage_type": "call"}, {"api_name": "fixtures.TempHomeDir", "line_number": 63, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 65, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 65, "usage_type": "attribute"}, {"api_name": "fixtures.StringStream", "line_number": 66, "usage_type": "call"}, {"api_name": "fixtures.MonkeyPatch", "line_number": 67, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 68, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 68, "usage_type": "attribute"}, {"api_name": "fixtures.StringStream", "line_number": 69, "usage_type": "call"}, {"api_name": "fixtures.MonkeyPatch", "line_number": 70, "usage_type": "call"}, {"api_name": "fixtures.FakeLogger", "line_number": 72, "usage_type": "call"}, {"api_name": "wsgiref.util.setup_testing_defaults", "line_number": 81, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 95, "usage_type": "call"}, {"api_name": "os_loganalyze.wsgi.application", "line_number": 100, "usage_type": "call"}, {"api_name": "os_loganalyze.wsgi", "line_number": 100, "usage_type": "name"}, {"api_name": "swiftclient.client", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "547446257", "text": "from controllers.tag import parse_tag_string\nimport json\nfrom database.influxdb import db\n\nfrom .default import DefaultRequestHandler\n\n\nclass SeriesHandler(DefaultRequestHandler):\n async def get(self, database: str):\n measurement = self.get_argument('measurement', None)\n tags_string = self.get_argument('tags', None)\n\n tags = {}\n if tags_string is not None:\n tags = parse_tag_string(tags_string)\n\n res = db.get_list_series(database=database,\n measurement=measurement,\n tags=tags)\n self.write(json.dumps(res))\n", "sub_path": "controllers/series.py", "file_name": "series.py", "file_ext": "py", "file_size_in_byte": 630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "default.DefaultRequestHandler", "line_number": 8, "usage_type": "name"}, {"api_name": "controllers.tag.parse_tag_string", "line_number": 15, "usage_type": "call"}, {"api_name": "database.influxdb.db.get_list_series", "line_number": 17, "usage_type": "call"}, {"api_name": "database.influxdb.db", "line_number": 17, "usage_type": "name"}, {"api_name": "database.influxdb", "line_number": 17, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "92063118", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 5 20:18:41 2020\r\n\r\n@author: dhrup\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\nimport tweepy\r\nimport Tweet_cred as tc\r\nimport re\r\n\r\n\r\nalltweets = []\t\r\n\r\ndef get_all_tweets(screen_name):\r\n auth = tweepy.OAuthHandler(tc.consumer_key,tc.consumer_secret)\r\n auth.set_access_token(tc.access_key, tc.access_secret)\r\n api = tweepy.API(auth)\r\n new_tweets = api.user_timeline(screen_name = screen_name,count=200)\r\n alltweets.extend(new_tweets)\r\n \r\n oldest = alltweets[-1].id - 1\r\n while len(new_tweets)>0:\r\n new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)\r\n #save most recent tweets\r\n alltweets.extend(new_tweets)\r\n #update the id of the oldest tweet less one\r\n oldest = alltweets[-1].id - 1\r\n print (\"...%s tweets downloaded so far\" % (len(alltweets))) \r\n \r\n outtweets = [[tweet.created_at,tweet.entities[\"hashtags\"],tweet.entities[\"user_mentions\"],tweet.favorite_count,\r\n tweet.geo,tweet.id_str,tweet.lang,tweet.place,tweet.retweet_count,tweet.retweeted,tweet.source,tweet.text,\r\n tweet._json[\"user\"][\"location\"],tweet._json[\"user\"][\"name\"],tweet._json[\"user\"][\"time_zone\"],\r\n tweet._json[\"user\"][\"utc_offset\"]] for tweet in alltweets]\r\n \r\n import pandas as pd\r\n tweets_df = pd.DataFrame(columns = [\"time\",\"hashtags\",\"user_mentions\",\"favorite_count\",\r\n \"geo\",\"id_str\",\"lang\",\"place\",\"retweet_count\",\"retweeted\",\"source\",\r\n \"text\",\"location\",\"name\",\"time_zone\",\"utc_offset\"])\r\n tweets_df[\"time\"] = pd.Series([str(i[0]) for i in outtweets])\r\n tweets_df[\"hashtags\"] = pd.Series([str(i[1]) for i in outtweets])\r\n tweets_df[\"user_mentions\"] = pd.Series([str(i[2]) for i in outtweets])\r\n tweets_df[\"favorite_count\"] = pd.Series([str(i[3]) for i in outtweets])\r\n tweets_df[\"geo\"] = pd.Series([str(i[4]) for i in outtweets])\r\n tweets_df[\"id_str\"] = pd.Series([str(i[5]) for i in outtweets])\r\n tweets_df[\"lang\"] = pd.Series([str(i[6]) for i in outtweets])\r\n tweets_df[\"place\"] = pd.Series([str(i[7]) for i in outtweets])\r\n tweets_df[\"retweet_count\"] = pd.Series([str(i[8]) for i in outtweets])\r\n tweets_df[\"retweeted\"] = pd.Series([str(i[9]) for i in outtweets])\r\n tweets_df[\"source\"] = pd.Series([str(i[10]) for i in outtweets])\r\n tweets_df[\"text\"] = pd.Series([str(i[11]) for i in outtweets])\r\n tweets_df[\"location\"] = pd.Series([str(i[12]) for i in outtweets])\r\n tweets_df[\"name\"] = pd.Series([str(i[13]) for i in outtweets])\r\n tweets_df[\"time_zone\"] = pd.Series([str(i[14]) for i in outtweets])\r\n tweets_df[\"utc_offset\"] = pd.Series([str(i[15]) for i in outtweets])\r\n tweets_df.to_csv(screen_name+\"_tweets.csv\")\r\n return tweets_df\r\n\r\nnews= get_all_tweets(\"WIONews\")\r\n\r\n\r\nfrom textblob import TextBlob\r\n\r\nnews['polarity'] = news.apply(lambda x: TextBlob(x['text']).sentiment.polarity, axis = 1)\r\nnews['subjectivity'] = news.apply(lambda x: TextBlob(x['text']).sentiment.subjectivity, axis = 1)\r\n\r\nprint(news[['text','polarity','subjectivity']])\r\n\r\nnews.loc[news['polarity'] < 0, 'sentiments'] = 'Negative' \r\nnews.loc[news['polarity'] == 0, 'sentiments'] = 'Neutral' \r\nnews.loc[news['polarity'] > 0, 'sentiments'] = 'Positive' \r\n\r\n\r\n\r\nprint(news[['polarity','sentiments']])\r\n\r\n\r\nnews['sentiments'].value_counts().plot(kind='pie')\r\n", "sub_path": "Twitter _assignment.py", "file_name": "Twitter _assignment.py", "file_ext": "py", "file_size_in_byte": 3455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "Tweet_cred.consumer_key", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Tweet_cred.consumer_secret", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Tweet_cred.access_key", "line_number": 19, "usage_type": "attribute"}, {"api_name": "Tweet_cred.access_secret", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 57, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 66, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "27592572", "text": "import xml.etree.ElementTree as ElementTree\nimport eviltransform as trans\n\n\ndef transform_gpx(input_path, output_path=\"output.gpx\"):\n ElementTree.register_namespace('', \"http://www.topografix.com/GPX/1/1\")\n ElementTree.register_namespace('xsi', \"http://www.w3.org/2001/XMLSchema-instance\")\n tree = ElementTree.parse(input_path)\n root = tree.getroot()\n root.attrib['creator'] = \"i-am-running https://github.com/yangl1996/i-am-running\"\n\n for point in root.iter('{http://www.topografix.com/GPX/1/1}trkpt'):\n location = (float(point.attrib['lat']), float(point.attrib['lon']))\n transformed = trans.gcj2wgs_exact(gcjLat=location[0], gcjLng=location[1])\n point.attrib['lat'] = str(transformed[0])\n point.attrib['lon'] = str(transformed[1])\n tree.write(output_path)\n", "sub_path": "iamrunning/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 6, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 6, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.register_namespace", "line_number": 7, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 7, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 8, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 8, "usage_type": "name"}, {"api_name": "eviltransform.gcj2wgs_exact", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "490481276", "text": "import sys\r\nimport requests\r\nfrom PyQt5.QtWidgets import QWidget,QApplication,QLabel,QPushButton,QVBoxLayout,QHBoxLayout,QLineEdit,QTextEdit\r\n\r\nclass Pencere(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.init_ui()\r\n\r\n def init_ui(self):\r\n self.button = QPushButton(\"Çevir\")\r\n self.text1 = QLineEdit()\r\n self.text2 = QLineEdit()\r\n self.money = QLineEdit()\r\n self.result = QTextEdit()\r\n\r\n\r\n v_box = QVBoxLayout()\r\n v_box.addWidget(self.text1)\r\n v_box.addWidget(self.text2)\r\n v_box.addWidget(self.money)\r\n v_box.addWidget(self.button)\r\n v_box.addStretch()\r\n\r\n h_box = QHBoxLayout()\r\n h_box.addLayout(v_box)\r\n h_box.addStretch()\r\n h_box.addWidget(self.result)\r\n\r\n self.setLayout(h_box)\r\n\r\n self.button.clicked.connect(self.click)\r\n self.show()\r\n\r\n def click(self):\r\n self.first_currency = self.text1.text()\r\n self.second_currency = self.text2.text()\r\n self.amount = self.money.text()\r\n self.api_key = \"dc341a246b748823041c14676097334c\"\r\n self.url = \"http://data.fixer.io/api/latest?access_key=\" + self.api_key\r\n self.response = requests.get(self.url)\r\n self.infos = self.response.json()\r\n self.first_value = self.infos[\"rates\"][self.first_currency]\r\n self.second_value = self.infos[\"rates\"][self.second_currency]\r\n print(self.amount,self.first_currency,(self.second_value/self.first_value)*float((self.amount)),self.second_currency)\r\n self.result.setText(str(self.amount) + \" \" + self.first_currency + \" = \" + str(self.second_value/self.first_value * float(self.amount)) + \" \" + self.second_currency)\r\n\r\napp = QApplication(sys.argv)\r\npencere = Pencere()\r\nsys.exit(app.exec_())", "sub_path": "PyQt5/döviz.py", "file_name": "döviz.py", "file_ext": "py", "file_size_in_byte": 1813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTextEdit", "line_number": 15, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "549550594", "text": "import socket\nfrom sunposition import sunpos\nfrom datetime import datetime\nimport os\n\n\n \ndef Main(ip, port, sessionID): \n \n s = socket.socket()\n s.bind((ip,port))\n s.listen(1)\n print(\"listening\")\n c, addr = s.accept()\n print (\"Connection from: \" + str(addr))\n while 1:\n receivedData = c.recv(1024)\n \n if not receivedData:\n break\n \n receivedDataStr = receivedData.decode('utf-8')\n matrixStr, gpsStr = receivedDataStr.split('?')\n \n fileTxt = open(sessionID + \"ARKitWorld_2_ARKitCam.txt\", \"a+\")\n fileTxt.write(matrixStr)\n fileTxt.close()\n \n lat, lon, alt, horAcc, timeStamp = gpsStr.split(\"!\")\n time = datetime.utcnow()\n az, zen, ra, dec, h = sunpos(time,float(lat),float(lon),float(alt))\n \n sunPosStr = str(az) + \"!\" + str(zen)\n fileTxt = open(sessionID + \"Zenith_Azimuth.txt\", \"a+\")\n fileTxt.write(sunPosStr)\n fileTxt.close()\n \n fileTxt = open(sessionID + \"GPS_Coord.txt\", \"a+\")\n fileTxt.write(\"latitude: \" + lat + \"\\nlongitude: \" + lon + \"\\naltitude: \" + alt + \"\\nhorizontal accuracy: \" + horAcc + \"\\ntime stamp: \" + timeStamp)\n fileTxt.close()\n \n print(\"from connected user:\\n\" + receivedDataStr)\n break\n \n s.close()\n print(\"shut down\")\n\nif __name__ == '__main__':\n ipHost = \"172.20.10.2\"\n portHost = 5004\n \n sessionID = input(\"Enter session ID: \")\n imgPath = \"../Sessions/\" + sessionID + \"/\"\n if not os.path.exists(imgPath):\n os.makedirs(imgPath)\n os.makedirs(imgPath + \"iPhone/\")\n Main(ipHost, portHost, imgPath + \"iPhone/\")\n else:\n print(\"Directory with same session ID already exist!\")\n", "sub_path": "tcpServer.py", "file_name": "tcpServer.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "socket.socket", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "sunposition.sunpos", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 55, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "146067859", "text": "# -*- coding:utf-8 -*-\n\n\"\"\"\n Author: Helixcs\n Site: https://iliangqunru.bitcron.com/\n File: weibo_api.py\n Time: 5/19/18\n\"\"\"\n\nimport requests\nfrom typing import Optional\n\nResponse = Optional[str]\n\n_GET_INDEX = \"https://m.weibo.cn/api/container/getIndex\"\n\n\nclass WeiboApiException(Exception):\n def __init__(self, message):\n self.message = message\n\n\ndef search_by_name(name: str) -> Response:\n \"\"\"get summary info which searched by name,\n this api is like 'https://m.weibo.cn/api/container/getIndex?queryVal=&containerid=100103type%3D3%26q%3D'\n\n >>> from weibo_base import search_by_name\n >>> _response = search_by_name('Helixcs')\n >>> ..._response\n :param name: nick name which you want to search\n :return json string including summary info\n \"\"\"\n _params = {'queryVal': name, 'containerid': '100103type%3D3%26q%3D' + name}\n _response = requests.get(url=_GET_INDEX, params=_params)\n if _response.status_code == 200:\n return _response.json()\n return None\n\n\ndef weibo_getIndex(uid_value: str) -> Response:\n \"\"\"\n get personal summary info which request by uid, and uid is got by 'search_by_name'\n this api is like 'https://m.weibo.cn/api/container/getIndex?type=uid&value='\n\n >>> from weibo_base import weibo_getIndex\n >>> _response = weibo_getIndex('1843242321')\n >>> ..._response\n :param uid_value:\n :return:\n \"\"\"\n _params = {\"type\": \"uid\", \"value\": uid_value}\n _response = requests.get(url=_GET_INDEX, params=_params)\n if _response.status_code == 200:\n return _response.json()\n return None\n\n\ndef weibo_tweets(containerid: str, page: int) -> Response:\n \"\"\"\n get person weibo tweets which from contaninerid in page,\n this api is like 'https://m.weibo.cn/container/getIndex?containerid=&page='\n >>> from weibo_base import weibo_tweets\n >>> _response = weibo_tweets(contaierid='1076031843242321',page=1)\n >>> ..._response\n :param contaierid: containerid\n :param page: page\n :return:\n \"\"\"\n _params = {\"containerid\": containerid, \"page\": page}\n _response = requests.get(url=_GET_INDEX, params=_params)\n if _response.status_code == 200:\n return _response.json()\n return None\n\n\n# =========== api component ==============\n\n\ndef exist_get_uid(search_by_name_response: str = None, name: str = \"\") -> dict:\n \"\"\"\n whether name is exist in response which from search api, if exist ,return uid\n :param search_by_name_response:\n :param name:\n :return:\n \"\"\"\n if not search_by_name_response or str(search_by_name_response) == '':\n search_by_name_response = search_by_name(name)\n # bad request\n if search_by_name_response.get('ok') != 1:\n return {\"exist\": False, \"name\": name, \"uid\": None}\n card_type = [card for card in search_by_name_response.get(\"data\").get(\"cards\") if card.get('card_type') == 11]\n if len(card_type) < 1:\n return {\"exist\": False, \"name\": name, \"uid\": None}\n\n user = card_type[0].get('card_group')[0].get('user')\n screen_name = user.get('screen_name')\n if screen_name == name:\n return {\"exist\": True, \"name\": name, \"uid\": user.get('id')}\n return {\"exist\": False, \"name\": name, \"uid\": None}\n\n\ndef get_weibo_containerid(weibo_getIndex_response: str = None, uid: str = \"\"):\n \"\"\"\n get weibo_containerid\n :param uid: uid\n :return: weibo_containerid\n \"\"\"\n if weibo_getIndex_response is None or str(weibo_getIndex_response) == '':\n weibo_getIndex_response = weibo_getIndex(uid)\n if weibo_getIndex_response.get('ok') != 1:\n return None\n tabs = weibo_getIndex_response.get('data').get('tabsInfo').get('tabs')\n for tab in tabs:\n if tab.get('tab_type') == 'weibo':\n return tab.get('containerid')\n return None\n", "sub_path": "weibo_base/weibo_api.py", "file_name": "weibo_api.py", "file_ext": "py", "file_size_in_byte": 3883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "typing.Optional", "line_number": 13, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "360421709", "text": "import paho.mqtt.client as paho\nimport json\nimport time\nimport random\nimport django\ndjango.setup()\n\nbroker=\"192.168.0.11\"\nport= 9001\nsub_topic=\"test\"\n\n \ndef on_message(client, userdata, message):\n print(\"message received \" ,str(message.payload.decode(\"utf-8\")))\n\n\nclient= paho.Client(\"receive-socks\",transport='websockets') #create client object\n#client= mqtt.Client(\"receive-socks\") # no websocket\n\nclient.connect(broker,port) #establish connection\n\n\nclient.loop_start()\nclient.subscribe(sub_topic)\nclient.on_message = on_message #assign function to callback\n\n\n\nclient= paho.Client(\"publish-socks\",transport='websockets') #create client object\n\nclient.connect(broker,port) #establish connection\n\nwhile True:\n rand_int = random.randint(1, 100)\n client.publish(sub_topic,rand_int) #publish\n print('publishing: '+str(rand_int))\n time.sleep(1)\n", "sub_path": "blog/mqtt.py", "file_name": "mqtt.py", "file_ext": "py", "file_size_in_byte": 900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 17, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 17, "usage_type": "name"}, {"api_name": "paho.mqtt.client.Client", "line_number": 29, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 29, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "98212891", "text": "import asyncio\n\nimport aiohttp\nimport requests\nimport time\nfrom tqdm import tqdm\n\nfrom KEGG_parser.parsers import parse_ko\n\n\ndef get_from_kegg_flat_file(file_loc, list_of_ids=None, parser=parse_ko):\n record_list = list()\n for entry in open(file_loc).read().split('///')[:-1]:\n record = parser(entry)\n if list_of_ids is not None:\n if record['ENTRY'] in list_of_ids:\n record_list.append(record)\n else:\n record_list.append(record)\n return record_list\n\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\nasync def download_coroutine(session, url, attempts=10, wait=30):\n for _ in range(attempts):\n async with session.get(url) as response:\n if response.status == 200:\n return await response.text()\n elif response.status != 403:\n raise ValueError('Bad HTTP request status %s: %s\\n%s' % (response.status, response.reason, url))\n await asyncio.sleep(wait)\n\n raise ValueError('KEGG has forbidden request after %s attempts for url %s , which returns a response status of %s' % \n (attempts, url, response.status))\n\n\nasync def kegg_download_manager(loop, list_of_ids):\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n\n async with aiohttp.ClientSession(loop=loop) as session:\n tasks = [download_coroutine(session, url) for url in urls]\n results = await asyncio.gather(*tasks)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]\n\ndef download_synchronous(url, attempts=10):\n for _ in range(attempts):\n response = requests.get(url)\n\n if response.status_code == 200:\n return response.text\n \n # if none of our attempts have returned OK\n raise ValueError('KEGG has forbidden request after %s attempts for url %s , which returns a response status of %s' % \n (attempts, url, response.status_code))\n\ndef kegg_download_manager_synchronous(list_of_ids, wait=1):\n \"\"\"This is a backup in case the async downloading is forbidden.\"\"\"\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n num_urls = len(urls)\n print(f\"Total urls to download: {num_urls}. Progress will be shown below.\")\n results = []\n for url in tqdm(urls):\n results.append(download_synchronous(url))\n time.sleep(wait)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]\n\n\n\ndef get_from_kegg_api(loop, list_of_ids, parser):\n try:\n return [parser(raw_record) for raw_record in loop.run_until_complete(kegg_download_manager(loop, list_of_ids))]\n except ValueError:\n print(\"Asynchronous downloading of KEGG records has failed. KEGG parser will try to download data sequentially.\"\n \"This will be slower.\")\n time.sleep(30)\n return [parser(raw_record) for raw_record in kegg_download_manager_synchronous(list_of_ids)]\n\ndef get_kegg_record_dict(list_of_ids, parser, records_file_loc=None, verbose=False):\n if records_file_loc is None:\n loop = asyncio.get_event_loop()\n records = get_from_kegg_api(loop, list_of_ids, parser)\n else:\n records = get_from_kegg_flat_file(records_file_loc, list_of_ids, parser)\n if verbose:\n print(\"%s records acquired\" % len(records))\n return {record['ENTRY']: record for record in records}\n\n\nasync def kegg_link_download_manager(loop, link1, link2):\n async with aiohttp.ClientSession(loop=loop) as session:\n url = 'http://rest.kegg.jp/link/%s/%s' % (link1, link2)\n tasks = [download_coroutine(session, url)]\n results = await asyncio.gather(*tasks)\n link_dict = dict()\n if len(results) != 1:\n raise ValueError('Result had more than one value')\n for link in results[0].strip().split('\\n'):\n obj_1, obj_2 = link.strip().split()\n obj_1 = obj_1.strip().split(':')[1]\n obj_2 = obj_2.strip().split(':')[1]\n link_dict[obj_2] = obj_1\n return link_dict\n\n\ndef get_kegg_link_from_api(link1, link2):\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(kegg_link_download_manager(loop, link1, link2))\n", "sub_path": "KEGG_parser/downloader.py", "file_name": "downloader.py", "file_ext": "py", "file_size_in_byte": 4367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "KEGG_parser.parsers.parse_ko", "line_number": 11, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 45, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 87, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 97, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 100, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "616041640", "text": "from argparse import Namespace\n\nimport logging\n\nfrom ..utils._groups import delete_group, get_groups\nfrom ..utils._profile import extract_profile\nfrom ..utils.scope._acl import get_acls, delete_acl\nfrom ..utils.scope._delete import delete_scope\nfrom ..utils.scope._extract import extract_scopes\n\nlogger = logging.getLogger(__name__)\n\n\ndef delete_scope_cli(args: Namespace):\n \"\"\"Updates the secret scope configuration of the databricks instance defined in the current profile\n\n :param Namespace args: The arguments from the cli\n :return:\n \"\"\"\n\n # Get the base profile\n profile, base_config = extract_profile(args)\n\n # Get the workspace groups\n groups = get_groups(profile)\n\n # Get the existing scopes\n scopes = extract_scopes(profile)\n\n # Check scope name\n scope_name = args.scope_name\n scope_exists = scope_name in scopes\n\n # Construct the access groups\n accesses = ['read', 'write', 'manage']\n access_groups = {\n f'scope-{scope_name}-{access}': access.upper()\n for access in accesses\n }\n\n # Filter the existing groups\n existing_groups = [group for group in access_groups if group in groups]\n\n # Get the acls if the scope exists\n if scope_exists:\n # Get the acls for the scope\n acls = get_acls(scope_name, profile)\n else:\n acls = {}\n\n # Set deletions\n to_delete = {\n 'scope': scope_name,\n 'groups': existing_groups,\n 'acls': acls\n }\n\n # Verify deletion parameters\n if (not args.a and not args.s) or not scope_exists:\n to_delete.pop('scope')\n if (not args.a and not args.g) or not existing_groups:\n to_delete.pop('groups')\n if (not args.a and not args.c) or not acls:\n to_delete.pop('acls')\n\n # Set the deletion warning\n deletion_warning = ''\n if 'scope' in to_delete:\n deletion_warning += '\\nScope:'\n deletion_warning += f'\\n\\t{to_delete[\"scope\"]}'\n if 'groups' in to_delete:\n deletion_warning += '\\nGroups:'\n for group in to_delete['groups']:\n deletion_warning += f'\\n\\t{group}'\n if 'acls' in to_delete:\n deletion_warning += '\\nAcls:'\n for acl, permission in to_delete['acls'].items():\n deletion_warning += f'\\n\\t{(permission+\":\").ljust(8)}{acl}'\n\n deletion_warning = 'The following resources will be deleted:' + deletion_warning\n\n # Provide the debug output\n if args.d:\n print(deletion_warning)\n\n # Check for confirmation\n elif to_delete and (args.q or input(deletion_warning + '\\n(Y/N):').upper() == 'Y'):\n for principal in to_delete.get('acls', []):\n # Remove the existing acl\n delete_acl(principal, scope_name, profile)\n for group in to_delete.get('groups', []):\n # Remove the existing group\n delete_group(group, profile)\n if 'scope' in to_delete:\n # Delete the scope\n delete_scope(scope_name, profile)\n", "sub_path": "dbricks_setup/scope/_delete.py", "file_name": "_delete.py", "file_ext": "py", "file_size_in_byte": 2960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 14, "usage_type": "name"}, {"api_name": "utils._profile.extract_profile", "line_number": 22, "usage_type": "call"}, {"api_name": "utils._groups.get_groups", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.scope._extract.extract_scopes", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.scope._acl.get_acls", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.scope._acl.delete_acl", "line_number": 90, "usage_type": "call"}, {"api_name": "utils._groups.delete_group", "line_number": 93, "usage_type": "call"}, {"api_name": "utils.scope._delete.delete_scope", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "364683276", "text": "import cv2\r\n\r\nclass FacialRecognition:\r\n def __init__(self):\r\n self.faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n self.video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\r\n self.faces = []\r\n\r\n def size(self):\r\n return (\r\n int(self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),\r\n int(self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n )\r\n\r\n def release():\r\n video_capture.release()\r\n cv2.destroyAllWindows()\r\n\r\n def getFaces(self):\r\n _, self.frame = self.video_capture.read()\r\n self.faces = self.faceCascade.detectMultiScale(\r\n cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY),\r\n scaleFactor=1.1,\r\n minNeighbors=5,\r\n minSize=(30, 30),\r\n flags=cv2.CASCADE_SCALE_IMAGE\r\n )\r\n return self.faces\r\n\r\n def showImage(self):\r\n fw, fh = self.size() #frame width and height\r\n cv2.circle(self.frame, (int(fw/2), int(fh/2)), 2, (0, 0, 255), 1)\r\n for (x, y, w, h) in self.faces:\r\n cv2.rectangle(self.frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n cv2.circle(self.frame, (int(x+w/2), int(y+h/2)), 4, (255, 0, 0), 2)\r\n \r\n cv2.imshow('Video', self.frame)", "sub_path": "FacialRecognition.py", "file_name": "FacialRecognition.py", "file_ext": "py", "file_size_in_byte": 1164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.CAP_DSHOW", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.destroyAllWindows", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.CASCADE_SCALE_IMAGE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "472569609", "text": "from __future__ import print_function\nimport numpy as np\nimport cv2 as cv\nfrom math import sqrt\nimport sys\nfrom io import StringIO\n\n\n\nshow_hsv = False # global variable for drawing optical flow\n\n\n# Drawing optical flow\n# The next 3 functions will be used in the visualize_flow function\n\ndef draw_flow(img, flow, step=6):\n h, w = img.shape[:2]\n y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)\n fx, fy = flow[y,x].T\n lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)\n lines = np.int32(lines + 0.5)\n vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)\n cv.polylines(vis, lines, 0, (0, 255, 0))\n for (x1, y1), (_x2, _y2) in lines:\n cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)\n return vis\n\n\ndef draw_hsv(flow):\n h, w = flow.shape[:2]\n fx, fy = flow[:,:,0], flow[:,:,1]\n ang = np.arctan2(fy, fx) + np.pi\n v = np.sqrt(fx*fx+fy*fhy)\n hsv = np.zeros((h, w, 3), np.uint8)\n hsv[...,0] = ang*(180/np.pi/2)\n hsv[...,1] = 255\n hsv[...,2] = np.minimum(v*4, 255)\n bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n return bgr\n\n\ndef warp_flow(img, flow):\n h, w = flow.shape[:2]\n flow = -flow\n flow[:,:,0] += np.arange(w)\n flow[:,:,1] += np.arange(h)[:,np.newaxis]\n res = cv.remap(img, flow, None, cv.INTER_LINEAR)\n return res\n\n\n# Get video/camera images\n# video_filename: the file name of the video what you want to use as an input for optical flow\n# If you want to use your camera, the video_filename should be 0, 1 or 2\n\ndef get_cap(video_filename):\n cap = cv.VideoCapture(video_filename)\n global show_hsv\n show_hsv = False\n return cap\n\n\n# BGR2GRAY (optical flow is working on grayscale images)\n\ndef get_grayImage(cap):\n ret, img = cap.read()\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n return gray\n\n\ndef get_RGBImage(cap):\n ret, img = cap.read()\n return img\n \n\n# Visualize optical flow\n\ndef visualize_flow(gray, flow):\n cv.imshow('flow', draw_flow(gray, flow))\n global show_hsv\n if show_hsv:\n cv.imshow('flow HSV', draw_hsv(flow))\n ch = cv.waitKey(5)\n if ch == 27:\n return False\n if ch == ord('1'):\n show_hsv = not show_hsv\n print('HSV flow visualization is', ['off', 'on'][show_hsv])\n return True\n \n \n# Write the flow matrix to a mat file \n# matrix_name is the variable name to write\n# out_filename is the name of the .mat file\n\ndef writeToFile_flow(matrix_name, out_filename):\n matfile = out_filename\n scipy.io.savemat(matfile, mdict={'out': matrix_name}, oned_as='row')\n matdata = scipy.io.loadmat(matfile)\n assert np.all(matrix_name == matdata['out'])\n \n \ndef flow_mean(flow):\n mean_x = np.mean(flow[:,:,0])\n mean_y = np.mean(flow[:,:,1])\n return mean_x, mean_y\n\n\ndef isSlip(mean_x, mean_y, thresh_x, thresh_y):\n if(mean_x > thresh_x or mean_x < -thresh_x):\n print(\"slip in x\") \n if(mean_y > thresh_y or mean_y < -thresh_y):\n print(\"slip in y\")\n\n# Farneback optical flow calculation\n# This function handles the video, calculates the optical flow and the normalized values of dx and dy. You can add here standardization.\n\n# video filename: video input; use 0,1 or 2 if you want to use your own camera\n# frame_num: number of frames of the whole video\n# width: image width\n# height: image height\n\n# Next parameters are for the Farneback optical flow. \n# Recommended usage: prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0\n\n# Returns with the 3D optical flow matrix (every row is the optical flow of one frame, which contains x and y values)\n\ndef optflow_main(video_filename, width, height, pyr_scale = None, levels = 0.5, winsize = 3, iterations = 15, poly_n = 3, poly_sigma = 5, flags = 1.2, flow_p = 0):\n cap = get_cap(video_filename)\n prevgray = get_grayImage(cap)\n \n\n #allFlow = np.ndarray(shape = (frame_num, width*height, 2))\n\n\n while(True):\n rgb_cap = get_RGBImage(cap)\n \n gray = get_grayImage(cap)\n \n flow = cv.calcOpticalFlowFarneback(prevgray, gray, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags, flow_p)\n\n prevgray = gray\n\n optflow_visualized = visualize_flow(gray, flow)\n\n if not optflow_visualized:\n break\n \n #print(flow.shape)\n flowMean_x, flowMean_y = flow_mean(flow)\n slip = isSlip(flowMean_x, flowMean_y, 0, 0)\n \n font = cv.FONT_HERSHEY_SIMPLEX\n cv.putText(gray, \"flowMean_x\", (10,450), font, 3, (0, 255, 0), 2, cv.LINE_AA)\n\n #return allFlow # 3D optical flow matrix (every row is the optical flow of one frame, which contains x and y values)\n", "sub_path": "optical_flow_utils.py", "file_name": "optical_flow_utils.py", "file_ext": "py", "file_size_in_byte": 4629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.mgrid", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.polylines", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.minimum", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.remap", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.calcOpticalFlowFarneback", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 153, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 154, "usage_type": "attribute"}]} +{"seq_id": "399738265", "text": "# This is the Twisted Get Poetry Now! client, version 2.0.\n\n# NOTE: This should not be used as the basis for production code.\nimport sys\n#sys.path.append('C:\\\\Users\\\\PC\\\\Source\\\\Repos\\\\')\n#sys.path.append('C:\\\\Users\\\\PC\\\\Source\\\\Repos\\\\twisted_intro')\n\nimport datetime, argparse\nfrom twisted_intro.arg_parsing import parse_args\n\nfrom twisted.internet.protocol import Protocol, ClientFactory\n\n\nclass PoetryProtocol(Protocol):\n\n poem = ''\n task_num = 0\n timeoutSet = None\n timeout = None\n timed_out = False\n\n def successOnTime(self):\n if not self.timed_out and self.timeoutSet is not None:\n self.timeoutSet.cancel()\n\n def dataReceived(self, data):\n self.poem += data.decode()\n msg = 'Task %d: got %d bytes of poetry from %s'\n print(msg % (self.task_num, len(data), self.transport.getPeer()))\n\n def connectionLost(self, reason):\n self.poemReceived(self.poem)\n import traceback\n traceback.print_stack()\n\n def poemReceived(self, poem):\n self.factory.poem_finished(self.task_num, poem)\n self.successOnTime()\n\n def connectionMade(self):\n if self.timeout is not None:\n from twisted.internet import reactor\n self.timeoutSet = reactor.callLater(self.timeout, self.onTimeoutExpired)\n print('Timeout {} seconds set for connection with: {}'.format(self.timeout, self.transport.getPeer()))\n\n def onTimeoutExpired(self):\n print('Connection with {} timed out!'.format(self.transport.getPeer()), '\\nClosing connection')\n self.transport.loseConnection()\n self.timed_out = True\n print('Connection with {} closed.'.format(self.transport.getPeer()))\n\n\nclass PoetryClientFactory(ClientFactory):\n\n task_num = 1\n\n # tell base class what proto to build\n # class variable\n # overrides:\n # protocol = None\n # in base Factory class\n protocol = PoetryProtocol\n\n def __init__(self, poetry_count, timeouts):\n self.poetry_count = poetry_count\n self.timeouts = timeouts\n self.poems = {} # task num -> poem\n\n def buildProtocol(self, address):\n proto = ClientFactory.buildProtocol(self, address)\n proto.task_num = self.task_num\n proto.timeout = self.timeouts.pop(0)\n self.task_num += 1 # if called the first time reads task_num from class variable which is set to 1 for each new Factory\n return proto\n\n def poem_finished(self, task_num=None, poem=None):\n if task_num is not None:\n self.poems[task_num] = poem\n\n self.poetry_count -= 1\n\n if self.poetry_count == 0:\n self.report()\n from twisted.internet import reactor\n reactor.stop()\n\n def report(self):\n for i in self.poems:\n print ('Task {}: {} bytes of poetry'.format(i, len(self.poems[i])))\n\n def clientConnectionFailed(self, connector, reason):\n print('Failed to connect to:', connector.getDestination())\n self.poem_finished()\n\n\ndef poetry_main():\n addresses = parse_args()\n timeouts = [100,15,20]\n start = datetime.datetime.now()\n\n factory = PoetryClientFactory(len(addresses), timeouts)\n\n from twisted.internet import reactor\n\n for address in addresses:\n host, port = address\n reactor.connectTCP(host, port, factory)\n\n reactor.run()\n\n elapsed = datetime.datetime.now() - start\n\n print('Got {} poems in {}'.format(len(addresses), elapsed))\n\n\nif __name__ == '__main__':\n poetry_main()\n", "sub_path": "twisted_client_2/get_poetry.py", "file_name": "get_poetry.py", "file_ext": "py", "file_size_in_byte": 3517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "twisted.internet.protocol.Protocol", "line_number": 14, "usage_type": "name"}, {"api_name": "traceback.print_stack", "line_number": 34, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 43, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 43, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.ClientFactory", "line_number": 53, "usage_type": "name"}, {"api_name": "twisted.internet.protocol.ClientFactory.buildProtocol", "line_number": 70, "usage_type": "call"}, {"api_name": "twisted.internet.protocol.ClientFactory", "line_number": 70, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.stop", "line_number": 85, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 85, "usage_type": "name"}, {"api_name": "twisted_intro.arg_parsing.parse_args", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "attribute"}, {"api_name": "{'reactor': 'twisted.internet.reactor'}", "line_number": 101, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 107, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 107, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 109, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 109, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 111, "usage_type": "attribute"}]} +{"seq_id": "643805279", "text": "# -*- coding: utf-8 -*-\n# user = www\n\nimport unittest\nimport json\nfrom common.log_http import MyLog\nfrom common import contants\nfrom common.request import Request\nfrom ddt import ddt, data\nfrom common.do_excel import DoExcel\nfrom common.basic_data import DoRegex, Context\nfrom common.mysql_util import MysqlUtil\n\nww = DoExcel(contants.cases_path)\ncases = ww.get_cases('add')\n\n@ddt\nclass TestAdd(unittest.TestCase):\n def setUp(self):\n print(\"开始测试\")\n self.mysql = MysqlUtil()\n def tearDown(self):\n self.mysql.close()\n print(\"测试结束\")\n print(\"--------------------------------------\")\n\n @data(*cases)\n def test_add(self, case): # 必须传输一条case\n my_logger = MyLog()\n data = DoRegex.replace(case.data) # 首先从excel中取出来,然后做正则\n data = json.loads(data) # 然后在loads\n if hasattr(Context, 'cookies'): # 第一次登陆没有cookies,所以要判断\n cookies = getattr(Context, 'cookies')\n else:\n cookies = None\n res = Request(method=case.method, url=case.url, data=data, cookies=cookies)\n # 判断有没有cookies\n if res.get_cookies():\n setattr(Context, 'cookies', res.get_cookies())\n my_logger.debug(res.get_json())\n try:\n self.assertEqual(str(case.expected), res.get_json()['code'])\n except AssertionError as e:\n my_logger.error(\"断言失败\")\n raise e\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "testcases/test_audit.py", "file_name": "test_audit.py", "file_ext": "py", "file_size_in_byte": 1540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "common.do_excel.DoExcel", "line_number": 14, "usage_type": "call"}, {"api_name": "common.contants.cases_path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "common.contants", "line_number": 14, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 18, "usage_type": "attribute"}, {"api_name": "common.mysql_util.MysqlUtil", "line_number": 21, "usage_type": "call"}, {"api_name": "common.log_http.MyLog", "line_number": 29, "usage_type": "call"}, {"api_name": "ddt.data", "line_number": 30, "usage_type": "name"}, {"api_name": "common.basic_data.DoRegex.replace", "line_number": 30, "usage_type": "call"}, {"api_name": "common.basic_data.DoRegex", "line_number": 30, "usage_type": "name"}, {"api_name": "ddt.data", "line_number": 31, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "common.basic_data.Context", "line_number": 32, "usage_type": "argument"}, {"api_name": "common.basic_data.Context", "line_number": 33, "usage_type": "argument"}, {"api_name": "common.request.Request", "line_number": 36, "usage_type": "call"}, {"api_name": "ddt.data", "line_number": 36, "usage_type": "name"}, {"api_name": "common.basic_data.Context", "line_number": 39, "usage_type": "argument"}, {"api_name": "ddt.data", "line_number": 27, "usage_type": "call"}, {"api_name": "ddt.ddt", "line_number": 17, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "288738400", "text": "import requests, time\n\nt = time.localtime()\nyear = str(t.tm_year)\nmonth = str(\"%02d\"%t.tm_mon)\nday = str(\"%02d\"%t.tm_mday)\nhour = str(\"%02d\"%t.tm_hour)\nsec = t.tm_sec\nm = t.tm_min\n\nfilename = input(\"which property? ;\")\nf = open(filename+'.txt','r')\ns = eval(f.read())\nf.close()\n\nhost = s.get('host')\naccid = s.get('accid')\npermoney = \"5000\"\ndevid = s.get('devid')\nprjid = s.get('prjid')\nlogincode = s.get('logincode')\nupmoney = s.get('upmoney')\ndevtype = s.get('devtype')\ngroupid = s.get('groupid')\n\num = input(\"how much woulid you like to pay for this shower?\")\nif um != '':\n try:\n upmoney = um\n except Exception:\n pass\n finally:\n pass\n \nheader = {\n\"Content-Length\":\"0\",\n\"Host\":\"118.31.18.116\",\n\"Connection\":\"Keep-Alive\",\n\"Accept-Encoding\":\"gzip\",\n\"User-Agent\":\"okhttp/3.4.2\",\n}\n \ndef jiezhang(ConsumDT):\n url_consum = \"http://%s/appI/api/savexf?AccID=%s&ConsumeDT=%s&DevID=%s&GroupID=%s&PerMoney=%s&PrjID=%s&UpMoney=%s&devType=%s&loginCode=%s%2C508487&phoneSystem=ios&version=1.0.1\"%(host, accid, ConsumDT, devid, groupid, permoney, prjid, upmoney, devtype, logincode)\n print(url_consum)\n response = requests.post(url_consum,headers=header).content\n result = str(response,\"utf8\")\n print(result)\n result = eval(result)\n if result.get(\"error_code\")==\"0\":\n print(result.get(\"error_code\"))\n print(\"结账成功\")\n return 1\n else:\n print(\"failed to pay....retrying.........\")\n return 0\n sec_bak = sec\n\nwhile True:\n second = \"%02d\"%sec\n minute = str(\"%02d\"%m)\n ConsumDT = year+month+day+hour+minute+second\n if jiezhang(ConsumDT):\n break\n sec -=1\n if sec < 0:\n sec = 60\n m -= 1\n if sec == sec_bak:\n break", "sub_path": "based_on_BLE_portal_test/app/src/main/1.py", "file_name": "1.py", "file_ext": "py", "file_size_in_byte": 1741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "time.localtime", "line_number": 3, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "326406948", "text": "import os\nimport time\nfrom typing import Optional\nimport numpy as np\nimport torch\n\nfrom ..utils import *\nfrom ..models import autoencode\nfrom ...utils import AverageMeter, TensorboardWriter\n\nclass Trainer:\n def __init__(\n self,\n config,\n train_loader: torch.utils.data.DataLoader,\n model: torch.nn.Module,\n optimizers: torch.optim.Optimizer,\n print_freq: int = 50,\n tensorboard: bool = False,\n log_dir: Optional[str] = None\n ) -> None:\n self.config = config\n self.print_freq = print_freq\n\n self.train_loader = train_loader\n self.model = model\n self.optimizers = optimizers\n\n self.step = 0\n self.lr = self.config.multi_lr\n self.create_time = misc.get_datetime()\n self.epoch_len = len(self.train_loader)\n\n self.writer = TensorboardWriter(log_dir, tensorboard)\n self.track_loss = AverageMeter(tag='loss', writer=self.writer)\n\n def run_train(self) -> None:\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n start = time.time()\n\n for epoch in range(1, self.config.multi_epochs + 1):\n for i, batch in enumerate(self.train_loader):\n data_time.update(time.time() - start)\n\n self.optimizers.zero_grad()\n self.model.train()\n\n self.train_step(batch)\n\n batch_time.update(time.time() - start)\n start = time.time()\n\n self.update_learning_rate()\n self.save_model(epoch, i + 1)\n\n step = (epoch - 1) * self.epoch_len + i\n self.writer.set_step(step=step, mode='train')\n\n if i % self.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(\n epoch, i, len(self.train_loader),\n batch_time = batch_time,\n data_time = data_time,\n loss = self.track_loss\n )\n )\n\n def update_learning_rate(self) -> None:\n new_lr = (\n self.config.multi_lr\n * self.config.multi_lr_mult\n ** (self.step // self.config.multi_lr_mult_every)\n )\n if new_lr != self.lr:\n for optimizer in self.optimizers.optimizer_ls:\n update_learning_rate(optimizer=optimizer, new_lr=new_lr)\n self.lr = new_lr\n\n def save_model(self, i_epoch: int, i_iter: int) -> None:\n step = (i_epoch - 1) * self.epoch_len + i_iter\n if step > 0 and step % self.config.model_save_every == 0:\n filename = (\n f\"{self.config.multi_model_basename}_\"\n f\"{self.create_time}_\"\n f\"{step:06d}.p\"\n )\n save_path = os.path.join(self.config.model_path, filename)\n\n torch.save({\n \"optimizer\": self.optimizers,\n \"model\": self.model\n }, save_path)\n\n print(f\"Saved to: {save_path}\")\n\n def train_step(self, batch) -> None:\n raw_autoencode_loss, autoencode_logprobs, target_lengths = autoencode.get_autoencode_loss(\n batch = batch,\n encoder = self.model.encoder,\n decoder = self.model.decoder,\n max_length = self.config.multi_max_length + 2 # and \n )\n\n loss = raw_autoencode_loss * self.config.autoencode_loss_multiplier\n loss.backward()\n\n self.track_loss.update(loss.item())\n\n clip_gradients(\n model = self.model,\n grad_clip = self.config.multi_grad_clip\n )\n self.optimizers.step()\n", "sub_path": "src/multi/runners/trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 3953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.utils", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "utils.TensorboardWriter", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.AverageMeter", "line_number": 35, "usage_type": "call"}, {"api_name": "utils.AverageMeter", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.AverageMeter", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 95, "usage_type": "call"}, {"api_name": "models.autoencode.get_autoencode_loss", "line_number": 103, "usage_type": "call"}, {"api_name": "models.autoencode", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "401514367", "text": "import asyncio\nimport discord\nfrom discord.ext import commands\n\n\nclass Moderation:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"kick\")\n @commands.has_permissions(kick_members=True)\n async def _kick_command(self, ctx, member: discord.Member):\n \"\"\"\n Kick a member from the server.\n \"\"\"\n await member.kick(reason=f\"Kick requested by {ctx.author}.\")\n\n e = discord.Embed(\n title=f\"Kicked: {member.name}\",\n description=f\"{member.name} was kicked from the server.\"\n )\n e.set_thumbnail(\n url=member.avatar_url_as(static_format=\"png\", size=64)\n )\n\n await ctx.send(embed=e)\n\n @commands.command(name=\"ban\")\n @commands.has_permissions(ban_members=True)\n async def _ban_command(self, ctx, member: discord.Member, time: int = False):\n \"\"\"\n Ban a member from the server.\n You can use the time argument to specify then the member should be unbanned in seconds.\n \"\"\"\n if time <= 0:\n raise commands.BadArgument(\"An invalid argument was passed. The time argument can't be negative or 0.\")\n\n await member.ban(reason=f\"Ban requested by {ctx.author}.\")\n\n e = discord.Embed(\n title=f\"Banned: {member.name}\",\n description=f\"{member.name} was banned from the server.\",\n color=self.bot.color\n )\n e.set_thumbnail(\n url=member.avatar_url_as(static_format=\"png\", size=64)\n )\n\n await ctx.send(embed=e)\n\n if time:\n await asyncio.sleep(time)\n await member.unban(reason=\"Ban timer elapsed.\")\n try:\n e = discord.Embed(\n title=f\"Unbanned: {member.name}\",\n description=f\"You have been unbanned in {ctx.guild.name}.\",\n color=self.bot.color\n )\n e.set_thumbnail(\n url=ctx.guild.icon_url\n )\n await member.send(embed=e)\n except discord.Forbidden:\n pass\n\n @commands.command(name=\"mute\")\n @commands.has_permissions(manage_channels=True)\n async def _mute_command(self, ctx, member: discord.Member, time: int = False):\n \"\"\"\n Mute a member in all text/voice channels.\n You can use the time argument to specify then the member should be unmuted in seconds.\n \"\"\"\n success_channels = []\n for channel in ctx.guild.channels:\n try:\n if isinstance(channel, discord.channel.TextChannel):\n await channel.set_permissions(\n member,\n overwrite=discord.PermissionOverwrite(\n send_messages=False\n )\n )\n success_channels.append(channel.name)\n elif isinstance(channel, discord.channel.VoiceChannel):\n await channel.set_permissions(\n member,\n overwrite=discord.PermissionOverwrite(\n speak=False\n )\n )\n success_channels.append(channel.name)\n except discord.Forbidden:\n pass\n\n e = discord.Embed(\n title=f\"Muted: {member.name}\",\n description=f\"I muted {member.name} in the \\\n{len(success_channels)} channel(s) that I have permissions to mute in!\",\n color=self.bot.color\n )\n e.set_thumbnail(\n url=member.avatar_url_as(static_format=\"png\", size=64)\n )\n\n await ctx.send(embed=e)\n\n if time:\n await asyncio.sleep(time)\n for channel in ctx.guild.channels:\n try:\n if isinstance(channel, discord.channel.TextChannel):\n await channel.set_permissions(\n member,\n overwrite=None\n )\n elif isinstance(channel, discord.channel.VoiceChannel):\n await channel.set_permissions(\n member,\n overwrite=None\n )\n except discord.Forbidden:\n pass\n\n e = discord.Embed(\n title=f\"Unmuted: {member.name}\",\n description=f\"I unmuted {member.name}.\",\n color=self.bot.color\n )\n e.set_thumbnail(\n url=member.avatar_url_as(static_format=\"png\", size=64)\n )\n\n await ctx.send(member.mention, embed=e)\n\n @commands.command(name=\"unmute\")\n @commands.has_permissions(manage_channels=True)\n async def _unmute_command(self, ctx, member: discord.Member):\n \"\"\"\n Unmute a member.\n \"\"\"\n for channel in ctx.guild.channels:\n try:\n if isinstance(channel, discord.channel.TextChannel):\n await channel.set_permissions(\n member,\n overwrite=None\n )\n elif isinstance(channel, discord.channel.VoiceChannel):\n await channel.set_permissions(\n member,\n overwrite=None\n )\n except discord.Forbidden:\n pass\n\n e = discord.Embed(\n title=f\"Unmuted: {member.name}\",\n description=f\"I unmuted {member.name}.\",\n color=self.bot.color\n )\n e.set_thumbnail(\n url=member.avatar_url_as(static_format=\"png\", size=64)\n )\n\n await ctx.send(embed=e)\n\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n", "sub_path": "Zane/cogs/moderation.py", "file_name": "moderation.py", "file_ext": "py", "file_size_in_byte": 5834, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "discord.Member", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 19, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 31, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 37, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 41, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 56, "usage_type": "call"}, {"api_name": "discord.Forbidden", "line_number": 65, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 29, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 70, "usage_type": "attribute"}, {"api_name": "discord.channel", "line_number": 78, "usage_type": "attribute"}, {"api_name": "discord.PermissionOverwrite", "line_number": 81, "usage_type": "call"}, {"api_name": "discord.channel", "line_number": 86, "usage_type": "attribute"}, {"api_name": "discord.PermissionOverwrite", "line_number": 89, "usage_type": "call"}, {"api_name": "discord.Forbidden", "line_number": 94, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 97, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "discord.channel", "line_number": 113, "usage_type": "attribute"}, {"api_name": "discord.channel", "line_number": 118, "usage_type": "attribute"}, {"api_name": "discord.Forbidden", "line_number": 123, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 126, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 68, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 68, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 69, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 69, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 139, "usage_type": "attribute"}, {"api_name": "discord.channel", "line_number": 145, "usage_type": "attribute"}, {"api_name": "discord.channel", "line_number": 150, "usage_type": "attribute"}, {"api_name": "discord.Forbidden", "line_number": 155, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 158, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 137, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 137, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 138, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "49316307", "text": "#! /usr/bin/env python3\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nimport data_helpers_nsmc\n# from text_cnn import TextCNN\nfrom tensorflow.contrib import learn\nimport csv\nimport argparse\n\n\n# Parameters\n# ==================================================\n\n# Data Parameters\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/rt-polaritydata/rt-polarity.pos\", \"Data source for the positive data.\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/rt-polaritydata/rt-polarity.neg\", \"Data source for the negative data.\")\n\n# Data loading params for nsmc only\ntf.flags.DEFINE_string(\"pos_test_data_file\", \"./data/nsmc/test/toy_nsmc_pos_test.txt\", \"Data source for the positive test data.\")\ntf.flags.DEFINE_string(\"neg_test_data_file\", \"./data/nsmc/test/toy_nsmc_neg_test.txt\", \"Data source for the negative test data.\")\n\n# Eval Parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\n\ndef main(is_baseline, checkpoint_dir, pos_test_file, neg_test_file, prediction_file, performance_file):\n FLAGS = tf.flags.FLAGS\n\n # CHANGE THIS: Load data. Load your own data here\n if is_baseline:\n x_raw, y_test = data_helpers_nsmc.load_nsmc_test_data_and_labels_baseline(pos_test_file, neg_test_file)\n else:\n x_raw, y_test = data_helpers_nsmc.load_nsmc_test_data_and_labels_test(pos_test_file, neg_test_file)\n y_test = np.argmax(y_test, axis=1)\n\n # Map data into vocabulary\n vocab_path = os.path.join(checkpoint_dir, \"..\", \"vocab\")\n vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\n x_test = np.array(list(vocab_processor.transform(x_raw)))\n\n print(\"\\nEvaluating...\\n\")\n\n # Evaluation\n # ==================================================\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)\n graph = tf.Graph()\n with graph.as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n with tf.device('/device:GPU:0'):\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Get the placeholders from the graph by name\n input_x = graph.get_operation_by_name(\"input_x\").outputs[0]\n # input_y = graph.get_operation_by_name(\"input_y\").outputs[0]\n dropout_keep_prob = graph.get_operation_by_name(\"dropout_keep_prob\").outputs[0]\n\n # Tensors we want to evaluate\n predictions = graph.get_operation_by_name(\"output/predictions\").outputs[0]\n\n # Generate batches for one epoch\n batches = data_helpers_nsmc.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)\n\n # Collect the predictions here\n all_predictions = []\n\n for x_test_batch in batches:\n batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})\n all_predictions = np.concatenate([all_predictions, batch_predictions])\n\n # Save the evaluation to a csv\n predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))\n out_path = os.path.join(checkpoint_dir, \"..\", prediction_file)#\"prediction.csv\")\n print(\"Saving evaluation to {0}\".format(out_path))\n with open(out_path, 'w') as f:\n csv.writer(f).writerows(predictions_human_readable)\n\n # Print accuracy if y_test is defined\n if y_test is not None:\n correct_predictions = float(sum(all_predictions == y_test))\n perf_file_path = os.path.join(checkpoint_dir, \"..\", performance_file)#\"performance.txt\")\n with open(perf_file_path, 'wt') as outf:\n outf.write(\"Total number of test examples: {}\\n\".format(len(y_test)))\n outf.write(\"Accuracy: {:g}\\n\".format(correct_predictions/float(len(y_test))))\n\nif __name__ == \"__main__\":\n print(\"[\" + __file__ + \"] main invoked.\")\n\n AP = argparse.ArgumentParser(description=\"args parser\")\n AP.add_argument(\"-is_baseline\", action=\"store\", required=True,\n help=\"1 if it's for baseline model or 0 for test model\")\n AP.add_argument(\"-checkpoint_dir\", action=\"store\", required=True,\n help=\"checkpoint path to store the model files in\")\n AP.add_argument(\"-pos_test_file\", action=\"store\", required=True,\n help=\"data source for the positive test data\")\n AP.add_argument(\"-neg_test_file\", action=\"store\", required=True,\n help=\"data source for the negative test data\")\n AP.add_argument(\"-prediction_file\", action=\"store\", required=True,\n help=\"prediction file name\")\n AP.add_argument(\"-performance_file\", action=\"store\", required=True,\n help=\"performance file name\")\n ARGS = AP.parse_args()\n\n main(bool(int(ARGS.is_baseline)), ARGS.checkpoint_dir,\n ARGS.pos_test_file, ARGS.neg_test_file, ARGS.prediction_file, ARGS.performance_file)\n", "sub_path": "eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 5465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tensorflow.flags.DEFINE_string", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_string", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_integer", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.flags.DEFINE_boolean", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.flags", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.flags", "line_number": 36, "usage_type": "attribute"}, {"api_name": "data_helpers_nsmc.load_nsmc_test_data_and_labels_baseline", "line_number": 40, "usage_type": "call"}, {"api_name": "data_helpers_nsmc.load_nsmc_test_data_and_labels_test", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.learn.preprocessing.VocabularyProcessor.restore", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.contrib.learn.preprocessing", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.learn", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.Graph", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 64, "usage_type": "attribute"}, {"api_name": "data_helpers_nsmc.batch_iter", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "534724024", "text": "import contextlib\nimport datetime\nfrom unittest import mock\n\nimport pytest\nimport pytest_twisted\nimport pytz\nfrom pyVmomi import vim, vmodl\nfrom twisted.internet import defer\nfrom twisted.web.server import NOT_DONE_YET\n\nfrom vmware_exporter.vmware_exporter import main, HealthzResource, VmwareCollector, VMWareMetricsResource\n\n\nEPOCH = datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)\n\n\ndef _check_properties(properties):\n ''' This will run the prop list through the pyvmomi serializer and catch malformed types (not values) '''\n PropertyCollector = vmodl.query.PropertyCollector\n property_spec = PropertyCollector.PropertySpec()\n property_spec.pathSet = properties\n return len(property_spec.pathSet) > 0\n\n\n@mock.patch('vmware_exporter.vmware_exporter.batch_fetch_properties')\n@pytest_twisted.inlineCallbacks\ndef test_collect_vms(batch_fetch_properties):\n content = mock.Mock()\n\n boot_time = EPOCH + datetime.timedelta(seconds=60)\n\n snapshot_1 = mock.Mock()\n snapshot_1.createTime = EPOCH + datetime.timedelta(seconds=60)\n snapshot_1.name = 'snapshot_1'\n snapshot_1.childSnapshotList = []\n\n snapshot_2 = mock.Mock()\n snapshot_2.createTime = EPOCH + datetime.timedelta(seconds=120)\n snapshot_2.name = 'snapshot_2'\n snapshot_2.childSnapshotList = [snapshot_1]\n\n snapshot = mock.Mock()\n snapshot.rootSnapshotList = [snapshot_2]\n\n disk = mock.Mock()\n disk.diskPath = '/boot'\n disk.capacity = 100\n disk.freeSpace = 50\n\n batch_fetch_properties.return_value = {\n 'vm-1': {\n 'name': 'vm-1',\n 'runtime.host': vim.ManagedObject('host-1'),\n 'runtime.powerState': 'poweredOn',\n 'summary.config.numCpu': 1,\n 'runtime.bootTime': boot_time,\n 'snapshot': snapshot,\n 'guest.disk': [disk],\n }\n }\n\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n )\n\n inventory = {\n 'host-1': {\n 'name': 'host-1',\n 'dc': 'dc',\n 'cluster': 'cluster-1',\n }\n }\n\n metrics = collector._create_metric_containers()\n\n collector._labels = {}\n\n with mock.patch.object(collector, '_vmware_get_vm_perf_manager_metrics'):\n yield collector._vmware_get_vms(content, metrics, inventory)\n\n assert _check_properties(batch_fetch_properties.call_args[0][2])\n\n # General VM metrics\n assert metrics['vmware_vm_power_state'].samples[0][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n }\n assert metrics['vmware_vm_power_state'].samples[0][2] == 1.0\n\n assert metrics['vmware_vm_boot_timestamp_seconds'].samples[0][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n }\n assert metrics['vmware_vm_boot_timestamp_seconds'].samples[0][2] == 60\n\n # Disk info (vmguest)\n assert metrics['vmware_vm_guest_disk_capacity'].samples[0][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n 'partition': '/boot',\n }\n assert metrics['vmware_vm_guest_disk_capacity'].samples[0][2] == 100\n\n # Snapshots\n assert metrics['vmware_vm_snapshots'].samples[0][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n }\n assert metrics['vmware_vm_snapshots'].samples[0][2] == 2\n\n assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[0][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n 'vm_snapshot_name': 'snapshot_2',\n }\n assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[0][2] == 120\n\n assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[1][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n 'vm_snapshot_name': 'snapshot_1',\n }\n assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[1][2] == 60\n\n\n@pytest_twisted.inlineCallbacks\ndef test_collect_vm_perf():\n content = mock.Mock()\n\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n )\n\n inventory = {\n 'host-1': {\n 'name': 'host-1',\n 'dc': 'dc',\n 'cluster': 'cluster-1',\n }\n }\n\n metrics = collector._create_metric_containers()\n\n collector._labels = {'vm:1': ['vm-1', 'host-1', 'dc', 'cluster-1']}\n\n vms = {\n 'vm-1': {\n 'name': 'vm-1',\n 'obj': vim.ManagedObject('vm-1'),\n 'runtime.powerState': 'poweredOn',\n },\n 'vm-2': {\n 'name': 'vm-2',\n 'obj': vim.ManagedObject('vm-2'),\n 'runtime.powerState': 'poweredOff',\n }\n }\n\n metric_1 = mock.Mock()\n metric_1.id.counterId = 9\n metric_1.value = [9]\n\n metric_2 = mock.Mock()\n metric_2.id.counterId = 1\n metric_2.value = [1]\n\n ent_1 = mock.Mock()\n ent_1.value = [metric_1, metric_2]\n ent_1.entity = vim.ManagedObject('vm:1')\n\n content.perfManager.QueryStats.return_value = [ent_1]\n\n with mock.patch.object(collector, '_vmware_perf_metrics') as _vmware_perf_metrics:\n _vmware_perf_metrics.return_value = {\n 'cpu.ready.summation': 1,\n 'cpu.usage.average': 2,\n 'cpu.usagemhz.average': 3,\n 'disk.usage.average': 4,\n 'disk.read.average': 5,\n 'disk.write.average': 6,\n 'mem.usage.average': 7,\n 'net.received.average': 8,\n 'net.transmitted.average': 9,\n }\n\n yield collector._vmware_get_vm_perf_manager_metrics(content, vms, metrics, inventory)\n\n # General VM metrics\n assert metrics['vmware_vm_net_transmitted_average'].samples[0][1] == {\n 'vm_name': 'vm-1',\n 'host_name': 'host-1',\n 'cluster_name': 'cluster-1',\n 'dc_name': 'dc',\n }\n assert metrics['vmware_vm_net_transmitted_average'].samples[0][2] == 9.0\n\n\n@mock.patch('vmware_exporter.vmware_exporter.batch_fetch_properties')\ndef test_collect_hosts(batch_fetch_properties):\n content = mock.Mock()\n\n boot_time = EPOCH + datetime.timedelta(seconds=60)\n\n batch_fetch_properties.return_value = {\n 'host-1': {\n 'id': 'host:1',\n 'name': 'host-1',\n 'runtime.powerState': 'poweredOn',\n 'runtime.bootTime': boot_time,\n 'runtime.connectionState': 'connected',\n 'runtime.inMaintenanceMode': True,\n 'summary.quickStats.overallCpuUsage': 100,\n 'summary.hardware.numCpuCores': 12,\n 'summary.hardware.cpuMhz': 1000,\n 'summary.quickStats.overallMemoryUsage': 1024,\n 'summary.hardware.memorySize': 2048 * 1024 * 1024,\n },\n 'host-2': {\n 'id': 'host:2',\n 'name': 'host-2',\n 'runtime.powerState': 'poweredOff',\n }\n }\n\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n )\n\n inventory = {\n 'host:1': {\n 'dc': 'dc',\n 'cluster': 'cluster',\n },\n 'host:2': {\n 'dc': 'dc',\n 'cluster': 'cluster',\n }\n }\n\n metrics = collector._create_metric_containers()\n collector._vmware_get_hosts(content, metrics, inventory)\n\n assert _check_properties(batch_fetch_properties.call_args[0][2])\n\n assert metrics['vmware_host_memory_max'].samples[0][1] == {\n 'host_name': 'host-1',\n 'dc_name': 'dc',\n 'cluster_name': 'cluster'\n }\n assert metrics['vmware_host_memory_max'].samples[0][2] == 2048\n\n # In our test data we hava a host that is powered down - we should have its\n # power_state metric but not any others.\n assert len(metrics['vmware_host_power_state'].samples) == 2\n assert len(metrics['vmware_host_memory_max'].samples) == 1\n\n\n@mock.patch('vmware_exporter.vmware_exporter.batch_fetch_properties')\ndef test_collect_datastore(batch_fetch_properties):\n content = mock.Mock()\n\n batch_fetch_properties.return_value = {\n 'datastore-1': {\n 'name': 'datastore-1',\n 'summary.capacity': 0,\n 'summary.freeSpace': 0,\n 'host': ['host-1'],\n 'vm': ['vm-1'],\n 'summary.accessible': True,\n 'summary.maintenanceMode': 'normal',\n }\n }\n\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n )\n\n inventory = {\n 'datastore-1': {\n 'dc': 'dc',\n 'ds_cluster': 'ds_cluster',\n }\n }\n\n metrics = collector._create_metric_containers()\n collector._vmware_get_datastores(content, metrics, inventory)\n\n assert _check_properties(batch_fetch_properties.call_args[0][2])\n\n assert metrics['vmware_datastore_capacity_size'].samples[0][1] == {\n 'ds_name': 'datastore-1',\n 'dc_name': 'dc',\n 'ds_cluster': 'ds_cluster'\n }\n assert metrics['vmware_datastore_capacity_size'].samples[0][2] == 0.0\n\n assert metrics['vmware_datastore_maintenance_mode'].samples[0][1] == {\n 'ds_name': 'datastore-1',\n 'dc_name': 'dc',\n 'ds_cluster': 'ds_cluster',\n 'mode': 'normal'\n }\n assert metrics['vmware_datastore_maintenance_mode'].samples[0][2] == 1.0\n\n assert metrics['vmware_datastore_accessible'].samples[0][1] == {\n 'ds_name': 'datastore-1',\n 'dc_name': 'dc',\n 'ds_cluster': 'ds_cluster'\n }\n assert metrics['vmware_datastore_accessible'].samples[0][2] == 1.0\n\n\n@pytest_twisted.inlineCallbacks\ndef test_collect():\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n ignore_ssl=True,\n )\n\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(collector, '_vmware_connect'))\n get_inventory = stack.enter_context(mock.patch.object(collector, '_vmware_get_inventory'))\n get_inventory.return_value = ([], [])\n stack.enter_context(mock.patch.object(collector, '_vmware_get_vms')).return_value = defer.succeed(None)\n stack.enter_context(mock.patch.object(collector, '_vmware_get_datastores'))\n stack.enter_context(mock.patch.object(collector, '_vmware_get_hosts'))\n stack.enter_context(mock.patch.object(collector, '_vmware_disconnect'))\n metrics = yield collector.collect()\n\n assert metrics[0].name == 'vmware_vm_power_state'\n assert metrics[-1].name == 'vmware_vm_snapshot_timestamp_seconds'\n\n\n@pytest_twisted.inlineCallbacks\ndef test_collect_deferred_error_works():\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n ignore_ssl=True,\n )\n\n @defer.inlineCallbacks\n def _fake_get_vms(*args, **kwargs):\n yield None\n raise RuntimeError('An error has occurred')\n\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(collector, '_vmware_connect'))\n get_inventory = stack.enter_context(mock.patch.object(collector, '_vmware_get_inventory'))\n get_inventory.return_value = ([], [])\n stack.enter_context(mock.patch.object(collector, '_vmware_get_vms')).side_effect = _fake_get_vms\n stack.enter_context(mock.patch.object(collector, '_vmware_get_datastores'))\n stack.enter_context(mock.patch.object(collector, '_vmware_get_hosts'))\n stack.enter_context(mock.patch.object(collector, '_vmware_disconnect'))\n\n with pytest.raises(defer.FirstError):\n yield collector.collect()\n\n\ndef test_vmware_get_inventory():\n content = mock.Mock()\n\n # Compute case 1\n host_1 = mock.Mock()\n host_1._moId = 'host:1'\n host_1.name = 'host-1'\n\n folder_1 = mock.Mock()\n folder_1.host = [host_1]\n\n # Computer case 2\n host_2 = mock.Mock()\n host_2._moId = 'host:2'\n host_2.name = 'host-2'\n host_2.summary.config.name = 'host-2.'\n\n folder_2 = vim.ClusterComputeResource('computer-cluster:1')\n folder_2.__dict__['name'] = 'compute-cluster-1'\n folder_2.__dict__['host'] = [host_2]\n\n # Datastore case 1\n datastore_1 = vim.Datastore('datastore:1')\n datastore_1.__dict__['name'] = 'datastore-1'\n\n # Datastore case 2\n datastore_2 = vim.Datastore('datastore:2')\n datastore_2.__dict__['name'] = 'datastore-2'\n\n datastore_2_folder = mock.Mock()\n datastore_2_folder.childEntity = [datastore_2]\n datastore_2_folder.name = 'datastore2-folder'\n\n data_center_1 = mock.Mock()\n data_center_1.name = 'dc-1'\n data_center_1.hostFolder.childEntity = [folder_1, folder_2]\n data_center_1.datastoreFolder.childEntity = [datastore_1, datastore_2_folder]\n\n content.rootFolder.childEntity = [data_center_1]\n\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n ignore_ssl=True,\n )\n\n with contextlib.ExitStack() as stack:\n # We have to disable the LazyObject magic on pyvmomi classes so that we can use them as fakes\n stack.enter_context(mock.patch.object(vim.ClusterComputeResource, 'name', None))\n stack.enter_context(mock.patch.object(vim.ClusterComputeResource, 'host', None))\n stack.enter_context(mock.patch.object(vim.Datastore, 'name', None))\n\n host, ds = collector._vmware_get_inventory(content)\n\n assert host == {\n 'host:1': {\n 'name': 'host-1',\n 'dc': 'dc-1',\n 'cluster': '',\n },\n 'host:2': {\n 'name': 'host-2',\n 'dc': 'dc-1',\n 'cluster': 'compute-cluster-1',\n }\n }\n\n assert ds == {\n 'datastore-1': {\n 'dc': 'dc-1',\n 'ds_cluster': '',\n },\n 'datastore-2': {\n 'dc': 'dc-1',\n 'ds_cluster': 'datastore2-folder',\n }\n }\n\n\ndef test_vmware_connect():\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n ignore_ssl=True,\n )\n\n with mock.patch('vmware_exporter.vmware_exporter.connect') as connect:\n collector._vmware_connect()\n\n call_kwargs = connect.SmartConnect.call_args[1]\n assert call_kwargs['host'] == '127.0.0.1'\n assert call_kwargs['user'] == 'root'\n assert call_kwargs['pwd'] == 'password'\n assert call_kwargs['sslContext'] is not None\n\n\ndef test_vmware_disconnect():\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n )\n\n # Mock that we have a connection\n connection = object()\n collector.vmware_connection = connection\n\n with mock.patch('vmware_exporter.vmware_exporter.connect') as connect:\n collector._vmware_disconnect()\n connect.Disconnect.assert_called_with(connection)\n\n\ndef test_vmware_perf_metrics():\n counter = mock.Mock()\n counter.groupInfo.key = 'a'\n counter.nameInfo.key = 'b'\n counter.rollupType = 'c'\n counter.key = 1\n\n content = mock.Mock()\n content.perfManager.perfCounter = [counter]\n\n collect_only = {\n 'vms': True,\n 'vmguests': True,\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n }\n collector = VmwareCollector(\n '127.0.0.1',\n 'root',\n 'password',\n collect_only,\n )\n\n result = collector._vmware_perf_metrics(content)\n\n assert result == {'a.b.c': 1}\n\n\ndef test_healthz():\n request = mock.Mock()\n\n resource = HealthzResource()\n response = resource.render_GET(request)\n\n request.setResponseCode.assert_called_with(200)\n\n assert response == b'Server is UP'\n\n\ndef test_vmware_resource():\n request = mock.Mock()\n\n args = mock.Mock()\n args.config_file = None\n\n resource = VMWareMetricsResource(args)\n\n with mock.patch.object(resource, '_async_render_GET') as _async_render_GET:\n assert resource.render_GET(request) == NOT_DONE_YET\n _async_render_GET.assert_called_with(request)\n\n\n@pytest_twisted.inlineCallbacks\ndef test_vmware_resource_async_render_GET():\n request = mock.Mock()\n request.args = {\n b'vsphere_host': [b'127.0.0.1'],\n }\n\n args = mock.Mock()\n args.config_file = None\n\n resource = VMWareMetricsResource(args)\n\n with mock.patch('vmware_exporter.vmware_exporter.VmwareCollector') as Collector:\n Collector.return_value.collect.return_value = []\n yield resource._async_render_GET(request)\n\n request.setResponseCode.assert_called_with(200)\n request.write.assert_called_with(b'')\n request.finish.assert_called_with()\n\n\n@pytest_twisted.inlineCallbacks\ndef test_vmware_resource_async_render_GET_errback():\n request = mock.Mock()\n request.args = {\n b'vsphere_host': [b'127.0.0.1'],\n }\n\n args = mock.Mock()\n args.config_file = None\n\n resource = VMWareMetricsResource(args)\n\n with mock.patch('vmware_exporter.vmware_exporter.VmwareCollector') as Collector:\n Collector.return_value.collect.side_effect = RuntimeError('Test exception')\n yield resource._async_render_GET(request)\n\n request.setResponseCode.assert_called_with(500)\n request.write.assert_called_with(b'# Collection failed')\n request.finish.assert_called_with()\n\n\n@pytest_twisted.inlineCallbacks\ndef test_vmware_resource_async_render_GET_no_target():\n request = mock.Mock()\n request.args = {\n }\n\n args = mock.Mock()\n args.config_file = None\n\n resource = VMWareMetricsResource(args)\n\n with mock.patch('vmware_exporter.vmware_exporter.VmwareCollector'):\n yield resource._async_render_GET(request)\n\n request.setResponseCode.assert_called_with(500)\n request.write.assert_called_with(b'No vsphere_host or target defined!\\n')\n request.finish.assert_called_with()\n\n\ndef test_config_env_multiple_sections():\n env = {\n 'VSPHERE_HOST': '127.0.0.10',\n 'VSPHERE_USER': 'username1',\n 'VSPHERE_PASSWORD': 'password1',\n 'VSPHERE_MYSECTION_HOST': '127.0.0.11',\n 'VSPHERE_MYSECTION_USER': 'username2',\n 'VSPHERE_MYSECTION_PASSWORD': 'password2',\n }\n\n args = mock.Mock()\n args.config_file = None\n\n with mock.patch('vmware_exporter.vmware_exporter.os.environ', env):\n resource = VMWareMetricsResource(args)\n\n assert resource.config == {\n 'default': {\n 'ignore_ssl': False,\n 'vsphere_host': '127.0.0.10',\n 'vsphere_user': 'username1',\n 'vsphere_password': 'password1',\n 'collect_only': {\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n 'vmguests': True,\n 'vms': True,\n }\n },\n 'mysection': {\n 'ignore_ssl': False,\n 'vsphere_host': '127.0.0.11',\n 'vsphere_user': 'username2',\n 'vsphere_password': 'password2',\n 'collect_only': {\n 'datastores': True,\n 'hosts': True,\n 'snapshots': True,\n 'vmguests': True,\n 'vms': True,\n }\n }\n }\n\n\ndef test_main():\n with pytest.raises(SystemExit):\n main(['-h'])\n", "sub_path": "tests/unit/test_vmware_exporter.py", "file_name": "test_vmware_exporter.py", "file_ext": "py", "file_size_in_byte": 20843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "datetime.datetime", "line_number": 15, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pyVmomi.vmodl.query", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pyVmomi.vmodl", "line_number": 20, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 29, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 33, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 33, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 38, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 39, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 43, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 43, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 46, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 46, "usage_type": "name"}, {"api_name": "pyVmomi.vim.ManagedObject", "line_number": 54, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 54, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 70, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 89, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 89, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 89, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 26, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 27, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 151, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 151, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 160, "usage_type": "call"}, {"api_name": "pyVmomi.vim.ManagedObject", "line_number": 182, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 182, "usage_type": "name"}, {"api_name": "pyVmomi.vim.ManagedObject", "line_number": 187, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 187, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 192, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 192, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 196, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 196, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 200, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 200, "usage_type": "name"}, {"api_name": "pyVmomi.vim.ManagedObject", "line_number": 202, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 202, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 206, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 206, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 206, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 149, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 233, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 233, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 235, "usage_type": "call"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 265, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 231, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 231, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 303, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 303, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 324, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 301, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 301, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 375, "usage_type": "call"}, {"api_name": "contextlib.ExitStack", "line_number": 383, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 384, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 384, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 384, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 385, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 385, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 385, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 387, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 387, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 387, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 387, "usage_type": "call"}, {"api_name": "twisted.internet.defer", "line_number": 387, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 388, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 388, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 388, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 389, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 389, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 389, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 390, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 390, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 390, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 366, "usage_type": "attribute"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 406, "usage_type": "call"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 414, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 414, "usage_type": "name"}, {"api_name": "contextlib.ExitStack", "line_number": 419, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 420, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 420, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 420, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 421, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 421, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 421, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 423, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 423, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 423, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 424, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 424, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 424, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 425, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 425, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 425, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 426, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 426, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 426, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 428, "usage_type": "call"}, {"api_name": "twisted.internet.defer.FirstError", "line_number": 428, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 428, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 397, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 433, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 433, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 436, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 436, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 440, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 440, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 444, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 444, "usage_type": "name"}, {"api_name": "pyVmomi.vim.ClusterComputeResource", "line_number": 449, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 449, "usage_type": "name"}, {"api_name": "pyVmomi.vim.Datastore", "line_number": 454, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 454, "usage_type": "name"}, {"api_name": "pyVmomi.vim.Datastore", "line_number": 458, "usage_type": "call"}, {"api_name": "pyVmomi.vim", "line_number": 458, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 461, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 461, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 465, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 465, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 479, "usage_type": "call"}, {"api_name": "contextlib.ExitStack", "line_number": 487, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 489, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 489, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 489, "usage_type": "name"}, {"api_name": "pyVmomi.vim.ClusterComputeResource", "line_number": 489, "usage_type": "attribute"}, {"api_name": "pyVmomi.vim", "line_number": 489, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 490, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 490, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 490, "usage_type": "name"}, {"api_name": "pyVmomi.vim.ClusterComputeResource", "line_number": 490, "usage_type": "attribute"}, {"api_name": "pyVmomi.vim", "line_number": 490, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 491, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 491, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 491, "usage_type": "name"}, {"api_name": "pyVmomi.vim.Datastore", "line_number": 491, "usage_type": "attribute"}, {"api_name": "pyVmomi.vim", "line_number": 491, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 528, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 536, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 536, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 554, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 565, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 565, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 571, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 571, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 577, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 577, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VmwareCollector", "line_number": 587, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 600, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 600, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.HealthzResource", "line_number": 602, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 611, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 611, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 613, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 613, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VMWareMetricsResource", "line_number": 616, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 618, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 618, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 618, "usage_type": "name"}, {"api_name": "twisted.web.server.NOT_DONE_YET", "line_number": 619, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 625, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 625, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 630, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 630, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VMWareMetricsResource", "line_number": 633, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 635, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 635, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 623, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 646, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 646, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 651, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 651, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VMWareMetricsResource", "line_number": 654, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 656, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 656, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 644, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 667, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 667, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 671, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 671, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VMWareMetricsResource", "line_number": 674, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 676, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 676, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 665, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 694, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 694, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 697, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 697, "usage_type": "name"}, {"api_name": "vmware_exporter.vmware_exporter.VMWareMetricsResource", "line_number": 698, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 731, "usage_type": "call"}, {"api_name": "vmware_exporter.vmware_exporter.main", "line_number": 732, "usage_type": "call"}]} +{"seq_id": "434708338", "text": "# -*- coding: UTF-8 -*-\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport win32gui\nimport win32con\nimport requests\n# import public.public_parm as public_parm\nimport time\n\n\nclass UploadFile():\n option = webdriver.ChromeOptions()\n # option.add_argument('headless') # 设置option\n option.add_argument('-start-maximized')\n driver = webdriver.Chrome(chrome_options=option) # 调用带参数的谷歌浏览器\n\n # driver = webdriver.Chrome()\n ip = \"192.168.4.46\"\n port = \"8310\"\n cookieValue = ''\n user = \"test\"\n psd = \"icenter\"\n\n def __init__(self, ip, port, uesrName, password):\n self.ip = ip\n self.port = port\n self.user = uesrName\n self.psd = password\n\n def upload(self, params, startTime):\n self.driver.get(\"http://\"+self.ip+\":\"+self.port)\n # 免密码登录\n # self.driver.maximize_window()\n # self.driver.delete_all_cookies()\n # cookie = {'domain': self.ip, 'httpOnly': False, 'name': 'Authorization', 'path': '/', 'secure': False, 'value': self.cookieValue }\n # self.driver.add_cookie(cookie)\n # self.driver.refresh()\n # 登录\n time.sleep(5)\n loginForms = self.driver.find_elements_by_class_name(\"login-form-panel\")\n loginForm = \"\"\n for item in loginForms:\n if item.is_displayed():\n loginForm = item\n userNameInput = loginForm.find_elements_by_class_name(\"el-input__inner\")\n for item in userNameInput:\n if \"请输入用户名\" == item.get_property(\"placeholder\"):\n item.clear()\n item.send_keys(self.user)\n elif \"请输入密码\" == item.get_property(\"placeholder\"):\n item.send_keys(self.psd)\n loginButton = loginForm.find_element_by_tag_name(\"button\")\n loginButton.click()\n time.sleep(5)\n #进入数据管理页面\n mainPage = None\n time.sleep(5)\n for waitTime in range(0, 10):\n try:\n mainPage = self.driver.find_element_by_class_name(\"full-container\")\n break\n except NoSuchElementException:\n time.sleep(1)\n if not mainPage:\n self.driver.quit()\n return\n navwrap = self.driver.find_element_by_class_name(\"nav-wrap\")\n menuList = navwrap.find_elements_by_tag_name(\"a\")\n chengedPage = False\n for menu in menuList:\n if \"数据管理\" == menu.text:\n menu.click()\n chengedPage = True\n time.sleep(3)\n break\n if not chengedPage:\n print(\"未找到数据管理菜单\")\n self.driver.quit()\n return\n # 选择要上传到的文件夹\n folderFilterTree = None\n for waitTime in range(0,10):\n try:\n folderFilterTree = self.driver.find_element_by_id(\"filterTree\")\n break\n except NoSuchElementException:\n time.sleep(1)\n # 点击上传\n operbtns =self.driver.find_element_by_class_name(\"oper-btns\")\n uploadButton = operbtns.find_element_by_class_name(\"el-dropdown\")\n for param in params:\n respons = requests.request(method=\"GET\", url=\"http://192.168.4.46:8310/user/jwt/time\")\n while int(startTime) > int(respons.text):\n respons = requests.request(method=\"GET\", url=\"http://192.168.4.46:8310/user/jwt/time\")\n if int(startTime) == int(respons.text):\n break\n uploadButton.click()\n for i in range(0, 5):\n time.sleep(2)\n try:\n dropdownmenu = self.driver.find_elements_by_class_name(\"el-dropdown-menu\")\n liList = dropdownmenu[len(dropdownmenu)-1].find_elements_by_tag_name(\"li\")\n findTagFileType = False\n for li in liList:\n if param[\"fileType\"] == li.get_property(\"title\"):\n li.click()\n findTagFileType = True\n break\n if findTagFileType:\n time.sleep(3)\n dialog = win32gui.FindWindow('#32770', u'打开') # 找到windows对话框参数是(className,title)\n ComboBoxEx32 = win32gui.FindWindowEx(dialog, 0, 'ComboBoxEx32', None)\n ComboBox = win32gui.FindWindowEx(ComboBoxEx32, 0, 'ComboBox', None)\n Edit = win32gui.FindWindowEx(ComboBox, 0, 'Edit', None)\n # 上面3句依次找对象,直到找出输入框Edit对象的句柄\n button = win32gui.FindWindowEx(dialog, 0, 'Button', None) # 确定按钮\n win32gui.SendMessage(Edit, win32con.WM_SETTEXT, 0, param[\"filepath\"])\n win32gui.SendMessage(dialog, win32con.WM_COMMAND, 1, button)\n time.sleep(5)\n break\n else:\n print(\"未找到【\"+param[\"fileType\"]+\"】类型文件上传按钮\")\n break\n except NoSuchElementException:\n print(\"未找到【\" + param[\"fileType\"] + \"】类型文件上传按钮1\")\n time.sleep(1)\n uploadButton.click()\n # dropdownElements = self.driver.find_elements_by_class_name(\"el-dropdown\")\n # for dropdown in dropdownElements:\n # if self.user == dropdown.text:\n # dropdown.click()\n # break\n # time.sleep(1)\n # self.driver.find_element_by_class_name(\"fa-sign-out\").click()\n # time.sleep(1)\n # self.driver.delete_all_cookies()\n # self.driver.quit()\n\nfrom threading import Lock\nfrom concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED\ndef openChrome(url):\n driver = webdriver.Chrome()\n driver.get(url)\n\n\nif __name__ == '__main__':\n t = [{\"fileType\": \"原始影像\", \"filepath\": \"C:\\\\Users\\\\admin\\\\Desktop\\\\GF3_MDJ_NSC_009498_E121.2_N34.1_20180530_L1A_HHHV_L10003226453.tar.gz\"},\n # {\"fileType\": \"成果影像\", \"filepath\": \"\\\\\\\\192.168.49.40\\\\测试部共享\\\\测试数据\\\\iCenter1.1\\\\maoyanData\\\\其它数据\\\\成果影像\\\\ImageMosic.tiff\"},\n # {\"fileType\": \"矢量\", \"filepath\": \"\\\\\\\\192.168.49.40\\\\测试部共享\\\\测试数据\\\\iCenter1.1\\\\maoyanData\\\\其它数据\\\\矢量\\\\prov.zip\"},\n # {\"fileType\": \"地形\", \"filepath\": \"\\\\\\\\192.168.49.40\\\\测试部共享\\\\测试数据\\\\iCenter1.1\\\\maoyanData\\\\其它数据\\\\地形\\\\GlobalDEM1Km.tif\"},\n # {\"fileType\": \"文档\", \"filepath\": \"\\\\\\\\192.168.49.40\\\\测试部共享\\\\测试数据\\\\iCenter1.1\\\\maoyanData\\\\其它数据\\\\图片\\\\ISO9126.png\"},\n # {\"fileType\": \"图片\", \"filepath\": \"\\\\\\\\192.168.49.40\\\\测试部共享\\\\测试数据\\\\iCenter1.1\\\\maoyanData\\\\其它数据\\\\文档\\\\gvml.config.json\"},\n ]\n uploadFile = UploadFile(\"192.168.4.46\", \"8310\", \"test\", \"icenter\")\n uploadFile.upload(t,\"20200110070830\")\n # data = [\"http://192.168.4.37:8310\",\n # \"http://192.168.4.211:8310\"]\n # token = public_parm.get_token(\"http://192.168.4.37:8310\", \"baibo\", \"123456\")\n # wait(all_task, return_when=ALL_COMPLETED)\n # with ThreadPoolExecutor(max_workers=len(data)) as executor:\n # all_task = [executor.submit(openChrome, param) for param in data]\n # wait(all_task, return_when=ALL_COMPLETED)\n # threads = []\n # for url in data:\n # # 多线程\n # t1 = threading.Thread(target=openChrome, args=(url,))\n # threads.append(t1)\n # # 启动\n # for t2 in threads:\n # t2.start()\n # t2.join() # 此处注释掉会同时运行。但同时运行可能会出现遮挡导致有问题哦。\n", "sub_path": "uplodFile2.py", "file_name": "uplodFile2.py", "file_ext": "py", "file_size_in_byte": 7932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 62, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 74, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 86, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 92, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "win32gui.FindWindow", "line_number": 111, "usage_type": "call"}, {"api_name": "win32gui.FindWindowEx", "line_number": 112, "usage_type": "call"}, {"api_name": "win32gui.FindWindowEx", "line_number": 113, "usage_type": "call"}, {"api_name": "win32gui.FindWindowEx", "line_number": 114, "usage_type": "call"}, {"api_name": "win32gui.FindWindowEx", "line_number": 116, "usage_type": "call"}, {"api_name": "win32gui.SendMessage", "line_number": 117, "usage_type": "call"}, {"api_name": "win32con.WM_SETTEXT", "line_number": 117, "usage_type": "attribute"}, {"api_name": "win32gui.SendMessage", "line_number": 118, "usage_type": "call"}, {"api_name": "win32con.WM_COMMAND", "line_number": 118, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 119, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 124, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 142, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "224759584", "text": "\n# encoding = 'utf-8'\nfrom cmath import nan\n\nfrom numpy import NaN\nfrom opendatatools.common import RestAgent\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nimport json\nimport os\n\nclass LabelStudioAgent(RestAgent):\n\n def __init__(self, base_url):\n RestAgent.__init__(self)\n self.base_url = base_url.strip(\"/\")\n self.page_size = 30 # labelme默认30,和浏览器正常访问保持一致,不容易被筛选出来\n self.token = \"\"\n\n def login(self, username, password):\n url = self.base_url + \"/label_studio/user/login/\"\n\n res = self.session.get(url)\n token = bs(res.text, 'html.parser').select(\"#login-form > input[type=hidden]\")[0]['value']\n\n param = {\n 'csrfmiddlewaretoken': token,\n 'email' : username,\n 'password' : password,\n }\n print(param)\n res = self.session.post(url, data=param)\n if res.status_code == 200:\n soup = bs(res.text, 'html.parser')\n if soup.select(\"#main-content\"):\n return True, \"登录成功\"\n elif soup.select(\"#login-form\"):\n return False, \"登录失败\"\n return False, \"未知失败\"\n \n def list_projects(self):\n # TODO:目前只获取第一页,未来可以根据分页进行循环获取,加page/page_size主要还是模仿浏览器访问\n url = self.base_url + \"/label_studio/api/projects?page=%d&page_size=%d\" % (1, self.page_size)\n res = self.session.get(url)\n if res.status_code != 200:\n print(\"获取项目失败:\" + res.text)\n return None\n else:\n return pd.DataFrame(res.json()['results'])\n\n def list_views(self, project_id):\n url = self.base_url + \"/label_studio/api/dm/views?project=%d\" % project_id\n res = self.session.get(url)\n if res.status_code != 200:\n print(\"获取视图失败:\" + res.text)\n return None\n else:\n return pd.DataFrame(res.json())\n \n def list_images(self, project_id, view_id, page_end=0):\n # 加page/page_size主要还是模仿浏览器访问\n url = self.base_url + \"/label_studio/api/dm/tasks?page=%d&page_size=%d&view=%d&project=%d\" % (1, self.page_size, view_id, project_id)\n res = self.session.get(url)\n if res.status_code != 200:\n print(\"获取图像失败:\" + res.text)\n return None\n \n data = res.json()\n result = pd.DataFrame(data['tasks'])\n print(\"获取page[1]图像列表成功\")\n\n # 没有指定page_end,则加载所有图片元信息\n if page_end == 0:\n page_end = int( (data['total'] + 30) / 30 )\n\n # 从第2页开始下载,interaction=scroll也是为了模仿浏览器访问\n for page in range(2, page_end+1):\n url = self.base_url + \"/label_studio/api/dm/tasks?page=%d&page_size=%d&view=%d&interaction=scroll&project=%d\" % (page, self.page_size, view_id, project_id)\n res = self.session.get(url)\n data = res.json()\n result = pd.concat([pd.DataFrame(data['tasks']), result])\n print(\"获取page[%d]图像列表成功\" % page)\n return result\n \n def download_images(self, result, download_dir):\n result['download'] = 0\n dcol = list(result.columns).index('download')\n\n for i in range(len(result)):\n img_url = result.iloc[i]['data']['image']\n # TODO:加入随机间隔,模拟人下载\n f, s = self._download_file(img_url, download_dir)\n result.iloc[i, dcol] = 1\n\n return result\n\n def _download_file(self, url, destfolder):\n filename=url.replace(self.base_url, '').strip('/')\n fileuri = os.path.join(destfolder, filename)\n\n print(\"downloading %s to %s\" % (url, fileuri))\n if os.path.exists(fileuri):\n return fileuri, False\n\n filedir = os.path.dirname(fileuri)\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n\n with self.session.get(url, stream=True) as r:\n r.raise_for_status()\n with open(fileuri, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n if chunk:\n f.write(chunk)\n return fileuri, True", "sub_path": "opendatatools/labelstudio/labelstudio_agent.py", "file_name": "labelstudio_agent.py", "file_ext": "py", "file_size_in_byte": 4380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "opendatatools.common.RestAgent", "line_number": 12, "usage_type": "name"}, {"api_name": "opendatatools.common.RestAgent.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "opendatatools.common.RestAgent", "line_number": 15, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "435646724", "text": "import chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport config\n\nclass SqueezeNet10(chainer.Chain):\n\n \"\"\"SqueezeNet v1.0\"\"\"\n\n insize = 227\n finetuned_model_path = './models/squeezenet_v1.0.caffemodel'\n mean_value = (104, 117, 123)\n\n def call_fire(self, x, name):\n s1 = F.relu(self[name + '/squeeze1x1'](x))\n e1 = self[name + '/expand1x1'](s1)\n e3 = self[name + '/expand3x3'](s1)\n y = F.relu(F.concat((e1, e3), axis=1))\n return y\n\n def add_fire(self, name, in_channels, s1, e1, e3):\n super(SqueezeNet10, self).add_link(name + '/squeeze1x1', L.Convolution2D(in_channels, s1, 1))\n super(SqueezeNet10, self).add_link(name + '/expand1x1', L.Convolution2D(s1, e1, 1))\n super(SqueezeNet10, self).add_link(name + '/expand3x3', L.Convolution2D(s1, e3, 3, pad=1))\n\n def __init__(self, labelsize=config.labelsize):\n self.labelsize = labelsize\n super(SqueezeNet10, self).__init__()\n super(SqueezeNet10, self).add_link('conv1', L.Convolution2D(3, 96, 7, stride=2))\n self.add_fire('fire2', 96, 16, 64, 64)\n self.add_fire('fire3', 128, 16, 64, 64)\n self.add_fire('fire4', 128, 32, 128, 128)\n self.add_fire('fire5', 256, 32, 128, 128)\n self.add_fire('fire6', 256, 48, 192, 192)\n self.add_fire('fire7', 384, 48, 192, 192)\n self.add_fire('fire8', 384, 64, 256, 256)\n self.add_fire('fire9', 512, 64, 256, 256)\n super(SqueezeNet10, self).add_link('conv10', L.Convolution2D(\n 512, self.labelsize, 1, pad=1,\n initialW=np.random.normal(0, 0.01, (self.labelsize, 512, 1, 1))))\n self.train = True\n\n def __call__(self, x, t):\n h = F.relu(self.conv1(x))\n h = F.max_pooling_2d(h, 3, stride=2)\n\n h = self.call_fire(h, 'fire2')\n h = self.call_fire(h, 'fire3')\n h = self.call_fire(h, 'fire4')\n\n h = F.max_pooling_2d(h, 3, stride=2)\n\n h = self.call_fire(h, 'fire5')\n h = self.call_fire(h, 'fire6')\n h = self.call_fire(h, 'fire7')\n h = self.call_fire(h, 'fire8')\n\n h = F.max_pooling_2d(h, 3, stride=2)\n\n h = self.call_fire(h, 'fire9')\n h = F.dropout(h, ratio=0.5, train=self.train)\n\n h = F.relu(self.conv10(h))\n h = F.reshape(F.average_pooling_2d(h, h.data.shape[2]), (x.data.shape[0], self.labelsize))\n\n loss = F.softmax_cross_entropy(h, t)\n accuracy = F.accuracy(h, t)\n return loss\n", "sub_path": "models/squeezenet10.py", "file_name": "squeezenet10.py", "file_ext": "py", "file_size_in_byte": 2492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "chainer.Chain", "line_number": 6, "usage_type": "attribute"}, {"api_name": "chainer.functions.relu", "line_number": 15, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 15, "usage_type": "name"}, {"api_name": "chainer.functions.relu", "line_number": 18, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 18, "usage_type": "name"}, {"api_name": "chainer.functions.concat", "line_number": 18, "usage_type": "call"}, {"api_name": "chainer.links.Convolution2D", "line_number": 22, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 22, "usage_type": "name"}, {"api_name": "chainer.links.Convolution2D", "line_number": 23, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 23, "usage_type": "name"}, {"api_name": "chainer.links.Convolution2D", "line_number": 24, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 24, "usage_type": "name"}, {"api_name": "config.labelsize", "line_number": 26, "usage_type": "attribute"}, {"api_name": "chainer.links.Convolution2D", "line_number": 29, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 29, "usage_type": "name"}, {"api_name": "chainer.links.Convolution2D", "line_number": 38, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 38, "usage_type": "name"}, {"api_name": "chainer.functions.relu", "line_number": 44, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 44, "usage_type": "name"}, {"api_name": "chainer.functions.max_pooling_2d", "line_number": 45, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 45, "usage_type": "name"}, {"api_name": "chainer.functions.max_pooling_2d", "line_number": 51, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 51, "usage_type": "name"}, {"api_name": "chainer.functions.max_pooling_2d", "line_number": 58, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 58, "usage_type": "name"}, {"api_name": "chainer.functions.dropout", "line_number": 61, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 61, "usage_type": "name"}, {"api_name": "chainer.functions.relu", "line_number": 63, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 63, "usage_type": "name"}, {"api_name": "chainer.functions.reshape", "line_number": 64, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 64, "usage_type": "name"}, {"api_name": "chainer.functions.average_pooling_2d", "line_number": 64, "usage_type": "call"}, {"api_name": "chainer.functions.softmax_cross_entropy", "line_number": 66, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 66, "usage_type": "name"}, {"api_name": "chainer.functions.accuracy", "line_number": 67, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "475510370", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 15 13:46:25 2017\n\n@author: apelosse\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random as rd\nfrom pylab import *\n\n\n# courbe initiale\n\ndef distribution_initiale_xenon(N,L):\n \"\"\"\n Retourne la position des N particules présentes à l'état initial dans la matrice de taille NxN\n La distribution des position est aléatoire.\n Deux atomes peuvent être au même endroit.\n \n Exemple :\n \n >>> distribution_initiale_Xenon(2,10)\n [array([0, 5]), array([6, 2])]\n\n \"\"\"\n n=[0]*N\n for i in range(N):\n n[i]=[randint(0,L-1),0,1] #modification de l'abscisse par rapport au code précédent\n return n\n \n\n\ndef affichage_initial(Xe):\n m_instant=np.zeros((L,L))\n \n for i in range(N):\n p=Xe[i]\n m_instant[p[0]][p[1]] =1\n \n plt.imshow(m_instant,cmap='Spectral')\n plt.show()\n\ndef desintegration(tau,k):\n '''fonction modélisant la desintegration nucleaire à la kème étape. Retourne False\n si la particule n'est pas désintegrer, retourne True sinon'''\n p = random()\n return(p>np.exp(-np.log(2)*k/tau)) #modélisation de la loi de decroissance nucleaire\n\n'''Code animation diffusion'''\n\n# Paramètre\nL=10 #Taile de la matrice \nN=3 #Nombres de particules initialement \nNbre_Etapes=5 #Nombres d'instants dans l'expérience\ntau1 = 5*np.log(2)\ntau2 = 10*np.log(2)\n\n#initialisation\nion() # mode interaction on\nimage=plt.figure()\nXe=distribution_initiale_xenon(N,L)\nnb_particules=len(Xe)\nm_instant=np.zeros((L,L)) #Matrice qui va indiquer la postion des particules \n\n \nfor t in range(Nbre_Etapes):\n p=rd.randint(0,nb_particules-1)\n (x,y,k)=Xe[p]\n if k!= None :\n if desintegration(tau1,k) : #desintegration en Cesium\n Xe[p].remove\n test = True\n nb_particules += -1\n elif desintegration(tau2,k) : #desintegration en Xenon stable\n Xe[p][2] = None\n if not test :\n (a,b)=(rd.random(),rd.random())\n if a<1./3 : #Probabilité 1/3 de reculer de 1 selon x\n if x>0.: #Vérification que la particule peut reculer, dans le cas contraire, elle ne bouge pas\n x-=1 \n elif a>2./3 : #Probabilité 1/3 d'avancer de 1 selon x\n if x0.:\n y-=1\n elif b>2./3 :\n if y int:\n if not heights or len(heights) < 2:\n return 0\n\n res = 0\n left, right = 0, len(heights) - 1\n while left < right:\n width = right - left\n height = min(heights[left], heights[right])\n area = width * height\n if area > res:\n res = area\n if height == heights[left]:\n left += 1\n else:\n right -= 1\n return res\n", "sub_path": "0011_Container_With_Most_Water.py", "file_name": "0011_Container_With_Most_Water.py", "file_ext": "py", "file_size_in_byte": 753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "typing.List", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "650146724", "text": "# -*- coding: utf-8 -*-\n# @Author: gunjianpan\n# @Date: 2019-02-09 11:10:52\n# @Last Modified by: gunjianpan\n# @Last Modified time: 2019-03-28 21:09:01\n\nimport argparse\nimport codecs\nimport datetime\nimport re\nimport os\n\nfrom bs4 import BeautifulSoup\nfrom proxy.getproxy import GetFreeProxy\nfrom util.db import Db\nfrom util.util import begin_time, end_time, changeCookie, basic_req, can_retry, changeHtmlTimeout\n\n\"\"\"\n * blog @http\n * www.zhihu.com/api/v4/creator/content_statistics\n * www.jianshu.com/u/\n * blog.csdn.net\n .data/\n ├── cookie // zhihu cookie\n ├── google // google analysis data\n ├── slug // blog title slug\n └── title // blog title list\n\"\"\"\nproxy_req = GetFreeProxy().proxy_req\ndata_dir = 'blog/data/'\n\n\nclass TitleViews(object):\n \"\"\"\n update title views\n \"\"\"\n\n def __init__(self):\n self.Db = Db(\"blog\")\n self.local_views = {}\n self.title_map = {}\n self.title2slug = {}\n self.failured_map = {}\n self.zhihu_views = {}\n self.zhihu_id = {}\n self.jianshu_views = {}\n self.jianshu_id = {}\n self.csdn_views = {}\n self.csdn_id = {}\n self.exist_data = {}\n self.getTitleMap()\n self.insert_sql = '''INSERT INTO title_views(`title_name`, `local_views`, `zhihu_views`, `csdn_views`, `jianshu_views`, `zhihu_id`, `csdn_id`, `jianshu_id`) VALUES %s'''\n self.update_sql = '''REPLACE INTO title_views(`id`, `title_name`, `local_views`, `zhihu_views`, `csdn_views`, `jianshu_views`, `zhihu_id`, `csdn_id`, `jianshu_id`, `created_at`) VALUES %s'''\n self.new_day_sql = '''INSERT INTO page_views(`date`, `existed_views`, `existed_spider`) VALUES %s'''\n\n def loadLocalView(self):\n \"\"\"\n load local view\n \"\"\"\n if not os.path.exists(\"%sgoogle\" % data_dir):\n return\n with codecs.open(\"%sgoogle\" % data_dir, 'r', encoding='utf-8') as f:\n test = f.readlines()\n test = test[7:]\n for index in test:\n arr = index.split(',')\n slug = self.matchSlug(arr[0])\n if slug is None or slug not in self.title_map:\n continue\n print(slug + ' ' + str(arr[1]) + ' ' + arr[0])\n if slug in self.local_views:\n self.local_views[slug] += int(arr[1])\n else:\n self.local_views[slug] = int(arr[1])\n\n def getTitleMap(self):\n \"\"\"\n get title map\n \"\"\"\n if os.path.exists('%sslug' % data_dir):\n with codecs.open('%sslug' % data_dir, 'r', encoding='utf-8') as f:\n slug = f.readlines()\n else:\n slug = []\n if os.path.exists('%stitle' % data_dir):\n with codecs.open('%stitle' % data_dir, 'r', encoding='utf-8') as f:\n title = f.readlines()\n else:\n title = []\n self.title_map = {tempslug.split(\n '\"')[1]: title[num].split('\"')[1] for num, tempslug in enumerate(slug)}\n title2slug = {\n self.title_map[index]: index for index in self.title_map.keys()}\n noemoji_title = {self.filter_emoji(\n self.title_map[index]).replace('\\u200d', ''): index for index in self.title_map.keys()}\n self.title2slug = {**noemoji_title, **title2slug}\n\n def matchSlug(self, pattern):\n \"\"\"\n match slug\n \"\"\"\n arr = re.search(r'\\/([^\\/]+).html', pattern)\n return None if arr is None else arr.group(1)\n\n def getZhihuView(self):\n if os.path.exists('%scookie' % data_dir):\n with codecs.open('%scookie' % data_dir, 'r', encoding='utf-8') as f:\n cookie = f.readline()\n else:\n cookie = ' '\n changeCookie(cookie[:-1])\n url_basic = [\n 'https://www.zhihu.com/api/v4/creator/content_statistics/',\n 'articles?order_field=object_created&order_sort=descend&begin_date=2018-09-01&end_date=',\n datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n '&page_no='\n ]\n url = \"\".join(url_basic)\n json = self.get_request(url + '1', 1)\n if not json:\n return\n if not 'data' in json:\n if 'code' in json:\n print(json)\n return\n for index in json['data']:\n zhihu_title = index['title']\n zhihu_id = int(index['url_token'])\n zhihu_count = int(index['read_count'])\n\n if zhihu_title in self.title2slug:\n temp_slug = self.title2slug[zhihu_title]\n self.zhihu_id[temp_slug] = zhihu_id\n self.zhihu_views[temp_slug] = zhihu_count\n elif zhihu_id in self.zhihu_id_map:\n temp_slug = self.zhihu_id_map[zhihu_id]\n self.zhihu_id[temp_slug] = zhihu_id\n self.zhihu_views[temp_slug] = zhihu_count\n else:\n print(index['title'])\n\n for index in range(json['count'] // 10):\n print('zhihu', index)\n json = self.get_request(url + str(index + 2), 1)\n if not json:\n continue\n for index in json['data']:\n zhihu_title = index['title']\n zhihu_id = int(index['url_token'])\n zhihu_count = int(index['read_count'])\n\n if zhihu_title in self.title2slug:\n temp_slug = self.title2slug[zhihu_title]\n self.zhihu_id[temp_slug] = zhihu_id\n self.zhihu_views[temp_slug] = zhihu_count\n elif zhihu_id in self.zhihu_id_map:\n temp_slug = self.zhihu_id_map[zhihu_id]\n self.zhihu_id[temp_slug] = zhihu_id\n self.zhihu_views[temp_slug] = zhihu_count\n else:\n print(index['title'])\n\n def get_request(self, url, types):\n\n result = basic_req(url, 1)\n\n if not result:\n if can_retry(url):\n self.get_request(url, types)\n return\n return result\n\n def get_request_v2(self, url, types, header):\n\n result = proxy_req(url, 0, header=header)\n\n if not result or not len(result.find_all('div', class_='content')):\n if can_retry(url):\n self.get_request_v2(url, types, header)\n return\n return result\n\n def get_request_v3(self, url, types):\n\n result = basic_req(url, 0)\n\n if result is None or not result or not len(result.find_all('p', class_='content')):\n if can_retry(url):\n self.get_request_v3(url, types)\n return\n return result\n\n def getJianshuViews(self):\n \"\"\"\n get jianshu views\n \"\"\"\n header = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'cache-control': 'no-cache',\n 'pragma': 'no-cache',\n 'sec-ch-ua': 'Google Chrome 75',\n 'sec-fetch-dest': 'document',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-site': 'cross-site',\n 'sec-fetch-user': '?F',\n 'sec-origin-policy': '0',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3736.0 Safari/537.36'\n }\n\n basic_url = 'https://www.jianshu.com/u/2e0f69e4a4f0'\n\n for rounds in range(1, 4):\n url = basic_url if rounds == 1 else basic_url + \\\n '?order_by=shared_at&page=' + str(rounds)\n print(url)\n html = self.get_request_v2(url, 0, header)\n if html is None:\n print('None')\n return\n for index in html.find_all('li', class_=[\"\", 'have-img']):\n if len(index.find_all('i')) < 3:\n continue\n title = index.find_all('a', class_='title')[\n 0].text.replace('`', '')\n jianshu_id = int(index['data-note-id'])\n jianshu_count = int(index.find_all('a')[-2].text)\n if title in self.title2slug:\n temp_slug = self.title2slug[title]\n self.jianshu_id[temp_slug] = jianshu_id\n self.jianshu_views[temp_slug] = jianshu_count\n elif jianshu_id in self.jianshu_id_map:\n temp_slug = self.jianshu_id_map[jianshu_id]\n self.jianshu_id[temp_slug] = jianshu_id\n self.jianshu_views[temp_slug] = jianshu_count\n else:\n print(title)\n\n def getCsdnViews(self):\n \"\"\"\n get csdn views\n \"\"\"\n\n basic_url = \"https://blog.csdn.net/iofu728\"\n\n for index in range(1, 3):\n url = basic_url if index == 1 else basic_url + \\\n '/article/list/' + str(index) + '?'\n\n html = self.get_request_v3(url, 0)\n if html is None:\n print('None')\n return\n for div_lists in html.find_all('div', class_='article-item-box csdn-tracking-statistics'):\n if 'style' in div_lists.attrs:\n continue\n csdn_id = int(div_lists['data-articleid'])\n title = div_lists.a.contents[2].replace(\n '\\n', '').strip().replace('`', '')\n csdn_count = int(div_lists.find_all(\n 'span', class_='read-num')[0].span.text)\n if title in self.title2slug:\n temp_slug = self.title2slug[title]\n self.csdn_id[temp_slug] = csdn_id\n self.csdn_views[temp_slug] = csdn_count\n elif csdn_id in self.csdn_id_map:\n temp_slug = self.csdn_id_map[csdn_id]\n self.csdn_id[temp_slug] = csdn_id\n self.csdn_views[temp_slug] = csdn_count\n else:\n print(title)\n\n def filter_emoji(self, desstr, restr=''):\n '''\n filter emoji\n '''\n desstr = str(desstr)\n try:\n co = re.compile(u'[\\U00010000-\\U0010ffff]')\n except re.error:\n co = re.compile(u'[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]')\n return co.sub(restr, desstr)\n\n def init_db(self):\n self.loadLocalView()\n self.getZhihuView()\n self.getJianshuViews()\n self.getCsdnViews()\n insert_list = []\n for index in self.title_map.keys():\n insert_list.append((index, self.local_views[index] if index in self.local_views else 0, self.zhihu_views[index] if index in self.zhihu_views else 0, self.csdn_views[index] if index in self.csdn_views else 0, self.jianshu_views[index]\n if index in self.jianshu_views else 0, self.zhihu_id[index] if index in self.zhihu_id else 0, self.csdn_id[index] if index in self.csdn_id else 0, self.jianshu_id[index] if index in self.jianshu_id else 0))\n # return insert_list\n results = self.Db.insert_db(self.insert_sql % str(insert_list)[1:-1])\n if results:\n if len(insert_list):\n print('Insert ' + str(len(insert_list)) + ' Success!')\n else:\n pass\n\n def select_all(self):\n result = self.Db.select_db(\n \"SELECT `id`, `title_name`, `local_views`, `zhihu_views`, `csdn_views`, `jianshu_views`, `zhihu_id`, `csdn_id`, `jianshu_id`, `created_at` from title_views where `is_deleted`=0\")\n if result == False:\n print(\"SELECT Error!\")\n else:\n self.exist_data = {index[1]: list(index) for index in result}\n self.zhihu_id_map = {index[6]: index[1]\n for index in result if index[6]}\n self.csdn_id_map = {index[7]: index[1]\n for index in result if index[7]}\n self.jianshu_id_map = {index[8]: index[1]\n for index in result if index[8]}\n for index in self.exist_data:\n self.exist_data[index][-1] = self.exist_data[index][-1].strftime(\n '%Y-%m-%d %H:%M:%S')\n\n def update_view(self):\n changeHtmlTimeout(10)\n wait_map = {}\n self.select_all()\n self.getZhihuView()\n self.getJianshuViews()\n self.getCsdnViews()\n for index in self.zhihu_views.keys():\n if self.zhihu_views[index] == self.exist_data[index][3] and self.zhihu_id[index] == self.exist_data[index][6]:\n continue\n wait_map[index] = self.exist_data[index]\n wait_map[index][3] = self.zhihu_views[index]\n wait_map[index][6] = self.zhihu_id[index]\n for index in self.csdn_views.keys():\n if self.csdn_views[index] == self.exist_data[index][4] and self.csdn_id[index] == self.exist_data[index][7]:\n continue\n if index not in wait_map:\n wait_map[index] = self.exist_data[index]\n wait_map[index][4] = self.csdn_views[index]\n wait_map[index][7] = self.csdn_id[index]\n for index in self.jianshu_views.keys():\n if self.jianshu_views[index] == self.exist_data[index][5] and self.jianshu_id[index] == self.exist_data[index][8]:\n continue\n wait_map[index] = self.exist_data[index]\n wait_map[index][5] = self.jianshu_views[index]\n wait_map[index][8] = self.jianshu_id[index]\n update_list = [tuple(index) for index in wait_map.values()]\n # return update_list:q\n if not len(update_list):\n return\n results = self.Db.update_db(self.update_sql % str(update_list)[1:-1])\n if results:\n if len(update_list):\n print('Update ' + str(len(update_list)) + ' Success!')\n else:\n pass\n\n def new_day(self):\n day_data = self.Db.select_db(\n \"SELECT `today_views`, `existed_views` from page_views order by `id` desc limit 1\")\n if not os.path.exists('../blog/log/basic'):\n print('File not exist!!!')\n return\n with codecs.open(\"../blog/log/basic\", 'r', encoding='utf-8') as f:\n existed_spider = int(f.readlines()[1])\n today_date = datetime.datetime.now().strftime('%Y-%m-%d')\n new_day_list = [(today_date, day_data[0][0] +\n day_data[0][1], existed_spider)]\n results = self.Db.insert_db(self.new_day_sql % str(new_day_list)[1:-1])\n if results:\n if len(new_day_list):\n print('New day update' + str(len(new_day_list)) + ' Success!')\n else:\n pass\n\n\nif __name__ == '__main__':\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n parser = argparse.ArgumentParser(description='gunjianpan blog backup code')\n parser.add_argument('--model', type=int, default=1, metavar='N',\n help='model update or new day')\n model = parser.parse_args().model\n bb = TitleViews()\n if model == 1:\n bb.update_view()\n else:\n bb.new_day()\n bb.update_view()\n", "sub_path": "blog/titleviews.py", "file_name": "titleviews.py", "file_ext": "py", "file_size_in_byte": 15374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "proxy.getproxy.GetFreeProxy", "line_number": 29, "usage_type": "call"}, {"api_name": "util.db.Db", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 86, "usage_type": "call"}, {"api_name": "re.search", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 107, "usage_type": "call"}, {"api_name": "util.util.changeCookie", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 115, "usage_type": "attribute"}, {"api_name": "util.util.basic_req", "line_number": 165, "usage_type": "call"}, {"api_name": "util.util.can_retry", "line_number": 168, "usage_type": "call"}, {"api_name": "util.util.can_retry", "line_number": 178, "usage_type": "call"}, {"api_name": "util.util.basic_req", "line_number": 185, "usage_type": "call"}, {"api_name": "util.util.can_retry", "line_number": 188, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 281, "usage_type": "call"}, {"api_name": "re.error", "line_number": 282, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 283, "usage_type": "call"}, {"api_name": "util.util.changeHtmlTimeout", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path", "line_number": 360, "usage_type": "attribute"}, {"api_name": "codecs.open", "line_number": 363, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 365, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 365, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 377, "usage_type": "call"}, {"api_name": "os.path", "line_number": 377, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 378, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 379, "usage_type": "call"}]} +{"seq_id": "243695577", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom symposion.schedule.models import Presentation\n\nclass Command(BaseCommand):\n args = 'presentation_id'\n help = 'updates the specified presentation from its base proposal'\n\n def handle(self, *args, **options):\n if len(args) != 1:\n raise CommandError(\"provide a single presentation id\")\n pk = int(args[0])\n try:\n presentation = Presentation.objects.get(pk=pk)\n except Presentation.DoesNotExist:\n raise CommandError('Presentation \"%s\" does not exist' % pk)\n\n presentation.update_from_proposal()\n\n self.stdout.write('Successfully update presentation \"%s\"' % pk)\n\n", "sub_path": "symposion/schedule/management/commands/update_presentation.py", "file_name": "update_presentation.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 4, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandError", "line_number": 10, "usage_type": "call"}, {"api_name": "symposion.schedule.models.Presentation.objects.get", "line_number": 13, "usage_type": "call"}, {"api_name": "symposion.schedule.models.Presentation.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "symposion.schedule.models.Presentation", "line_number": 13, "usage_type": "name"}, {"api_name": "symposion.schedule.models.Presentation.DoesNotExist", "line_number": 14, "usage_type": "attribute"}, {"api_name": "symposion.schedule.models.Presentation", "line_number": 14, "usage_type": "name"}, {"api_name": "django.core.management.base.CommandError", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "596836967", "text": "from visdom import Visdom\nimport argparse\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--file')\n\nargs = parser.parse_args()\n\nviz = Visdom()\niter = 1\nloss_mask = [0]\nval_loss_mask = [0]\n\nprint(args.file)\nskip_iters = 100\n\nwith open(args.file, 'r') as f:\n for line in f:\n if \"mrcnn_class_loss\" in line:\n\n iter = iter + 1\n\n terms = line.split(\"-\")\n mask_loss_term = terms[7]\n mask_loss = mask_loss_term.split(\":\")[1]\n\n loss_mask.append(float(mask_loss))\n\n if \"val_mrcnn_mask_loss\" in line:\n\n terms = line.split(\"-\")\n val_mask_loss_term = terms[13]\n val_mask_loss = val_mask_loss_term.split(\":\")[1]\n\n val_loss_mask.append(float(val_mask_loss))\n else:\n val_loss_mask.append(val_loss_mask[-1])\n\n\nloss_mask_arr = np.array(loss_mask)\nval_loss_mask_arr = np.array(val_loss_mask)\n\nviz.line(X=np.column_stack((np.arange(0, iter), np.arange(0, iter))),\n Y=np.column_stack((loss_mask_arr, val_loss_mask_arr)))", "sub_path": "scripts/log_vis.py", "file_name": "log_vis.py", "file_ext": "py", "file_size_in_byte": 1097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "visdom.Visdom", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "227907399", "text": "from PyWeChatSpy import WeChatSpy\nimport requests\n\n\ndef get_reply(data):\n url = f\"http://api.douqq.com/?key=&msg={data}\" # key获取地址http://xiao.douqq.com/\n resp = requests.get(url)\n return resp.text\n\n\ndef parser(data):\n if data[\"type\"] == 1:\n # 登录信息\n print(data)\n elif data[\"type\"] == 203:\n # 微信登出\n print(\"微信退出登录\")\n elif data[\"type\"] == 5:\n # 消息\n for item in data[\"data\"]:\n print(item)\n wxid1, wxid2 = item[\"wxid1\"], item.get(\"wxid2\")\n if wxid1.endswith(\"chatroom\"):\n # 查询群信息\n spy.query_contact_details(wxid1)\n if wxid2:\n # 查询寻群内发言人信息\n spy.query_contact_details(wxid2, wxid1)\n else:\n # 查询普通联系人信息\n spy.query_contact_details(wxid1)\n elif data[\"type\"] == 2:\n # 联系人信息\n print(data)\n\n\nif __name__ == '__main__':\n spy = WeChatSpy(parser=parser)\n spy.run()\n\n", "sub_path": "example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 1087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "PyWeChatSpy.WeChatSpy", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "300342791", "text": "from sqlalchemy import Table, MetaData, Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.dialects.postgresql import ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \\\nDOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \\\nINTERVAL, MACADDR, NUMERIC, REAL, SMALLINT, TEXT, TIME, \\\nTIMESTAMP, UUID, VARCHAR\nimport re, string\nfrom itertools import permutations\n\n\nBase = declarative_base()\n\npaper_author = Table('paperauthor', Base.metadata,\n\tColumn('paperid', Integer, ForeignKey('paper.id')),\n\tColumn('authorid', Integer, ForeignKey('author.id'))\n)\n\nclass Authors(Base):\n\t__tablename__ = 'author'\n\n\tid = Column(INTEGER, primary_key=True)\n\tname = Column(VARCHAR)\n\taffiliation = Column(VARCHAR)\n\tinitial = Column(VARCHAR)\n\tinitial_hash = Column(INTEGER)\n\n\tpapers = relationship('Papers', secondary=paper_author, backref='authors')\n\n\tdef __repr__(self):\n\t\treturn u''.join(''%self.name).encode('utf-8')\n\n\t# Change to full last name + first initial (+ middle initial if exists)\n\tdef getInitials(self):\n\t\tnameElements = self.name.strip().split(' ')\n\t\tpattern = re.compile('[\\W_]+')\n\t\tres = []\n\t\tres.append(pattern.sub('', nameElements[0][0]) if len(nameElements[0]) else '')\n\t\tres.append(pattern.sub('', nameElements[-1][0]) if len(nameElements[-1]) else '')\n\t\treturn res\n\n\tdef getInitialsHash(self):\n\t\tpossibilities = list(permutations(''.join(self.initial)))\n\t\tsumHash = 0\n\t\tfor i in range(len(possibilities)):\n\t\t\tfor k in range(len(possibilities[i])):\n\t\t\t\tsumHash += (ord(possibilities[i][k].upper()) - ord('A')) ** (k + 1)\n\t\treturn sumHash\n\n\nclass Papers(Base):\n\t__tablename__ = 'paper'\n\t\n\tid = Column(INTEGER, primary_key=True)\n\ttitle = Column(VARCHAR)\n\tyear = Column(VARCHAR)\n\tconferenceid = Column(INTEGER)\n\tjournalid = Column(INTEGER)\n\tkeyword = Column(VARCHAR)\n\n\tdef __repr__(self):\n\t\treturn u''.join(''%self.id).encode('utf-8')\n", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 16, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.INTEGER", "line_number": 22, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.VARCHAR", "line_number": 23, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.VARCHAR", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.VARCHAR", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.INTEGER", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 28, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.INTEGER", "line_number": 54, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.VARCHAR", "line_number": 55, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.VARCHAR", "line_number": 56, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.INTEGER", "line_number": 57, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.INTEGER", "line_number": 58, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.VARCHAR", "line_number": 59, "usage_type": "argument"}]} +{"seq_id": "633572252", "text": "import util\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport open3d as o3d\r\nimport scipy.io as sio\r\n\r\ndensifyN = 50000\r\n\r\noutput_path = 'F:/Surface reconstruction pre'\r\nshape_file = 'F:/Surface reconstruction pre/airplane.obj'\r\nV,E,F_ = util.parseObj(shape_file)\r\nF = util.removeWeirdDuplicate(F_)\r\nVorig,Eorig,Forig = V.copy(),E.copy(),F.copy()\r\n\r\n# sort by length (maintain a priority queue)\r\nElist = list(range(len(E)))\r\nElist.sort(key=lambda i:util.edgeLength(V,E,i),reverse=True)\r\n\r\n# create edge-to-triangle and triangle-to-edge lists\r\nEtoF = [[] for j in range(len(E))]\r\nFtoE = [[] for j in range(len(F))]\r\nfor f in range(len(F)):\r\n\tv = F[f]\r\n\tutil.pushEtoFandFtoE(EtoF,FtoE,E,f,v[0],v[1])\r\n\tutil.pushEtoFandFtoE(EtoF,FtoE,E,f,v[0],v[2])\r\n\tutil.pushEtoFandFtoE(EtoF,FtoE,E,f,v[1],v[2])\r\nV,E,F = list(V),list(E),list(F)\r\n\r\n# repeat densification\r\nfor z in range(densifyN):\r\n\tutil.densify(V,E,F,EtoF,FtoE,Elist)\r\n \r\ndensifyV = np.array(V[-densifyN:])\r\n\r\nx=densifyV[:,0]\r\ny=densifyV[:,1]\r\nz=densifyV[:,2]\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(z, x, y, zdir='z', c= 'red')\r\nplt.savefig(\"demo.png\")\r\n\r\noutput_file= {\r\n\t\t\"V\": Vorig,\r\n\t\t\"E\": Eorig,\r\n\t\t\"F\": Forig,\r\n\t\t\"Vd\": densifyV\r\n\t}\r\n\r\nx=densifyV[:,0]\r\ny=densifyV[:,1]\r\nz=densifyV[:,2]\r\npoints = {\r\n \"x\":x,\r\n \"y\":y,\r\n \"z\":z\r\n }\r\nsio.savemat(\"airplane.mat\",{\r\n\t\t\"V\": Vorig,\r\n\t\t\"E\": Eorig,\r\n\t\t\"F\": Forig,\r\n\t\t\"Vd\": densifyV\r\n\t})\r\n", "sub_path": "densify_mat.py", "file_name": "densify_mat.py", "file_ext": "py", "file_size_in_byte": 1500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "util.parseObj", "line_number": 12, "usage_type": "call"}, {"api_name": "util.removeWeirdDuplicate", "line_number": 13, "usage_type": "call"}, {"api_name": "util.edgeLength", "line_number": 18, "usage_type": "call"}, {"api_name": "util.pushEtoFandFtoE", "line_number": 25, "usage_type": "call"}, {"api_name": "util.pushEtoFandFtoE", "line_number": 26, "usage_type": "call"}, {"api_name": "util.pushEtoFandFtoE", "line_number": 27, "usage_type": "call"}, {"api_name": "util.densify", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "scipy.io.savemat", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "159415555", "text": "import discord\nfrom discord.ext import tasks, commands\n\n\nclass MyCog(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.index = 0\n self.printer.start()\n\n def cog_unload(self):\n self.printer.cancel()\n\n @tasks.loop(seconds=1.0)\n async def printer(self):\n print(self.index)\n self.index += 1\n if self.index == 10:\n channel = self.bot.get_channel(607205666754658324)\n await channel.send('test')\n self.index = 0\n\n @printer.before_loop\n async def before_printer(self):\n print('waiting...')\n await self.bot.wait_until_ready()\n\n\ndef setup(bot):\n bot.add_cog(MyCog(bot))\n", "sub_path": "cogs/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 5, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 5, "usage_type": "name"}, {"api_name": "discord.ext.tasks.loop", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.tasks", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "16742659", "text": "import pandas as pd\nimport numpy as np\nimport pickle\nfrom sklearn.utils import class_weight\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\n#import tensorflow.keras as keras\nimport plaidml.keras\nimport os\nplaidml.keras.install_backend()\nos.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"\nimport keras\nfrom keras import Sequential\nfrom keras import regularizers\nfrom keras.layers import LSTM, Dense, Flatten, BatchNormalization, Dropout\nfrom keras.utils import to_categorical\n#import tensorflow as tf\nimport gc\n\n#%%\ndef splitSeconds(n, country, t, seconds, samplerate):\n length = seconds * samplerate\n data = pickle.load( open( \"Raw Track Data\\\\\" + country + \"_\" + t + \".p\", \"rb\" ) )\n tracks = data.track_id.unique()\n tracks = np.random.choice(tracks, size=n, replace=False)\n trackFeats = data[data.track_id.isin(tracks)]\n del data\n dur = trackFeats.iloc[:,1]\n long = trackFeats.loc[trackFeats.index.repeat(dur * samplerate)].reset_index(drop = True)\n del trackFeats\n long = long.sort_values(by = [\"track_id\", \"start\"])\n long['change'] = long.track_id.eq(long.track_id.shift())\n change = long[long.change == False].index\n long = long.iloc[:, 5:30]\n indices = np.concatenate((np.arange(0, long.shape[0], length), change))\n indices = np.sort(indices)\n indices = np.unique(indices)\n partition = np.split(np.array(long), indices)\n del long\n samples = []\n for i in partition:\n if i.shape[0] == length:\n samples = samples + [i]\n samples = np.stack(samples)\n gc.collect()\n return samples, np.repeat(np.array([country]), samples.shape[0])\n\n\n\n#%%\n\ndef getSamples(train_n, val_n, seconds, samplerate, countriesOfInterest,\n enc, verbose = 0):\n train_labels = pd.DataFrame()\n val_labels = pd.DataFrame()\n train_x = None\n train_labels = []\n val_x = None\n val_labels = []\n for country in countriesOfInterest:\n if verbose > 0:\n print(\"getting\",country)\n x1, y1 = splitSeconds(train_n, country, \"train\", seconds, samplerate)\n x2, y2 = splitSeconds(val_n, country, \"val\", seconds, samplerate)\n if train_x is None:\n train_x = x1\n train_labels = y1\n val_x = x2\n val_labels = y2\n else:\n train_x = np.append(train_x, x1, axis = 0)\n train_labels = np.append(train_labels, y1, axis = 0)\n del x1, y1\n val_x = np.append(val_x, x2, axis = 0)\n val_labels = np.append(val_labels, y2, axis = 0)\n del x2, y2\n gc.collect()\n # train_x = np.array(train_x)\n gc.collect()\n #y = np.dstack(train_x)\n #del train_x\n #train_x = np.rollaxis(y,-1)\n #del y\n # train_labels = np.array(train_labels)\n #val_x = np.array(val_x)\n gc.collect()\n #y = np.dstack(val_x)\n #del val_x\n #val_x = np.rollaxis(y,-1)\n #del y\n # val_labels = np.array(val_labels)\n class_weights = class_weight.compute_class_weight('balanced',\n countriesOfInterest,\n list(train_labels))\n train_labels = enc.transform(np.array(train_labels).reshape(-1,1)).toarray()\n val_labels = enc.transform(np.array(val_labels).reshape(-1,1)).toarray()\n return train_x, train_labels, val_x, val_labels, class_weights\n\n\ndef train(iterations, learn_rate, train_n, val_n, seconds, samplerate,\n countriesOfInterest, enc, epochs, model_dir,\n model):\n for i in range(iterations):\n adam = keras.optimizers.Adam(lr=learn_rate)\n model.compile(loss = \"categorical_crossentropy\", optimizer= adam, metrics=[\"acc\"])\n train_x, train_labels, val_x, val_labels, class_weights = getSamples(train_n, val_n, seconds, samplerate, countriesOfInterest, enc)\n print(np.sum(train_labels, axis = 0))\n model.fit(train_x, train_labels,\n epochs = i * epochs + epochs, \n initial_epoch = i * epochs,\n shuffle = True,\n validation_data = (val_x, val_labels),\n batch_size = 2048,\n class_weight = class_weights,\n # callbacks=[tensorboard_callback],\n verbose = 1)\n #model.save_weights(model_dir)\n #if i%2 == 0:\n # learn_rate = learn_rate/2\n if i % 1 == 0:\n preds = model.predict(val_x, batch_size = 2048, verbose = 1)\n # print(np.sum(train_labels, axis = 0))\n plt.imshow(\n confusion_matrix(\n enc.inverse_transform(preds), \n enc.inverse_transform(val_labels), \n # normalize = \"all\"\n )\n )\n plt.pause(.5)\n plt.show()\n preds = model.predict(train_x, batch_size = 2048, verbose = 1)\n plt.imshow(\n confusion_matrix(\n enc.inverse_transform(preds), \n enc.inverse_transform(train_labels), \n # normalize = \"all\"\n )\n )\n plt.pause(.5)\n plt.show()", "sub_path": "plaidmodule.py", "file_name": "plaidmodule.py", "file_ext": "py", "file_size_in_byte": 5238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "plaidml.keras.keras.install_backend", "line_number": 11, "usage_type": "call"}, {"api_name": "plaidml.keras.keras", "line_number": 11, "usage_type": "attribute"}, {"api_name": "plaidml.keras", "line_number": 11, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 45, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 76, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 78, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 80, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.utils.class_weight.compute_class_weight", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.utils.class_weight", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "471840474", "text": "import torch\nimport torchvision \nimport torch.nn.functional as F\nimport torch_geometric.data as tgd\nimport matplotlib.pyplot as plt\nfrom torch_geometric.nn import GCNConv\nfrom torch.nn import Linear\nfrom apollo.utils.util import Util\nfrom sklearn.manifold import TSNE\nfrom IPython.display import Javascript # Restrict height of output cell.\nfrom sklearn.model_selection import ShuffleSplit\nfrom apollo.utils.GCN_Mult import GCN_Mult\n#display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 300})'''))\n\n\n#model = GCN_Mult(hidden_channels=16,num_feats=train_vec[gvec_ind].num_features).double()\n#optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n#criterion = torch.nn.CrossEntropyLoss()\n\nclass GCN(object):\n \n def __init__(self,train_vec,test_vec,hidden_channels,epochs,weight_decay,learing_rate):\n ''' Initialization function for named entity recognition parts\n\n :param path_to_data: Path to news content\n '''\n self.train_vec = train_vec\n self.test_vec = test_vec\n self.model = GCN_Mult(hidden_channels=hidden_channels,num_feats=train_vec.num_features).double()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learing_rate, weight_decay=weight_decay)\n self.criterion = torch.nn.CrossEntropyLoss()\n pass\n \n def visualize(self,h, color):\n ''' Initialization function for named entity recognition parts\n\n :param path_to_data: Path to news content\n '''\n #z = TSNE(n_components=2).fit_transform(out.detach().cpu().numpy())\n z = []\n plt.figure(figsize=(10,10))\n plt.xticks([])\n plt.yticks([])\n plt.scatter(z[:, 0], z[:, 1], s=70, c=color, cmap=\"Set2\")\n plt.show()\n \n def train(self):\n ''' Initialization function for named entity recognition parts\n\n :param path_to_data: Path to news content\n '''\n self.model.train()\n self.optimizer.zero_grad() # Clear gradients.\n out = self.model(self.train_vec.x.double(), self.train_vec.edge_index) # Perform a single forward pass.\n loss = self.criterion(out, self.train_vec.y.long()) # Compute the loss solely based on the training nodes.\n loss.backward() # Derive gradients.\n self.optimizer.step() # Update parameters based on gradients.\n return loss\n\n def test(self):\n ''' Initialization function for named entity recognition parts\n\n :param path_to_data: Path to news content\n '''\n self.model.eval()\n out = self.model(self.test_vec.x.double(), self.test_vec.edge_index).double()\n pred = out.argmax(dim=1) # Use the class with highest probability.\n test_correct = pred == self.test_vec.y.double() # Check against ground-truth labels.\n test_acc = int(test_correct.sum()) / len(self.test_vec.y) # Derive ratio of correct predictions.\n return test_acc\n\n", "sub_path": "apollo/utils/GCN.py", "file_name": "GCN.py", "file_ext": "py", "file_size_in_byte": 2967, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "apollo.utils.GCN_Mult.GCN_Mult", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "468105789", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\n\ndef load_config(filepath):\n '''Read JSON configuration'''\n experiment = os.path.basename(filepath).split('.')[0]\n with open(filepath, 'r') as config_file:\n config_json = json.load(config_file)\n return experiment, config_json\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", help=\"Path to JSON config file\", required=True)\n parser.add_argument(\"-o\", \"--output\", help=\"Path to output directory\",\n required=True)\n parser.add_argument(\"-r\", \"--repeat\", help=\"Number of times to repeat each topology\", default=1, type=int)\n settings = parser.parse_args()\n\n # Load experiment configuration\n experiment, config = load_config(settings.config)\n\n # Prepare output directory\n base_outdir = os.path.join(os.path.abspath(settings.output), experiment)\n os.makedirs(base_outdir, exist_ok=True)\n\n base_frr_cmd = [\"python3\", \"frr_config.py\", \"-a\" ]\n\n # Run experiment specified number of times\n for iteration in range(settings.repeat):\n print(\"\\n## RUNNING ITERATION %d...\" % (iteration+1))\n # Run experiment for each topology\n for topology in config[\"topologies\"]:\n print(\"\\n#### RUNNING TOPOLOGY %s...\" % topology)\n # Prepare topology output directory\n topo_outdir = os.path.join(base_outdir, \"%s_%02d\" % (topology, iteration + 1))\n os.makedirs(topo_outdir, exist_ok=True)\n\n # Run frr_config.py\n config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"topologies\", \"%s.json\" % topology) \n extra_frr_args = [\"-t\", topo_outdir, \"-c\", config_path, \"-p\", str(config[\"packets\"]), \"-f\", '\"%s\"' % config[\"filter\"]]\n if (\"delay\" in config):\n extra_frr_args.extend([\"-d\", config[\"delay\"]])\n frr_cmd = \" \".join(base_frr_cmd) + \" start \" + \" \".join(extra_frr_args)\n print(frr_cmd)\n os.system(frr_cmd)\n frr_cmd = \" \".join(base_frr_cmd) + \" stop \" + \" \".join(extra_frr_args)\n print(frr_cmd)\n os.system(frr_cmd)\n\n # Run ospf_dump.py\n pcap_path = os.path.join(topo_outdir, \"tcpdump.pcap\")\n outfile_path = os.path.join(topo_outdir, \"parsed_tcp.txt\")\n ospf_dump_args = [\"python3\", \"../pattern_recog/ospf_dump.py\", \"-p\", pcap_path, \"-o\", outfile_path, \"-s\"]\n ospf_dump_cmd = \" \".join(ospf_dump_args)\n print(ospf_dump_cmd)\n os.system(ospf_dump_cmd)\n\nif __name__ == \"__main__\":\n main()", "sub_path": "frrouting_automation/bulk.py", "file_name": "bulk.py", "file_ext": "py", "file_size_in_byte": 2613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.basename", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.system", "line_number": 49, "usage_type": "call"}, {"api_name": "os.system", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "385874664", "text": "from matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy.integrate import solve_ivp\nfrom numpy import cos, pi, sin, sqrt, exp, random\nimport matplotlib\nmatplotlib.rcParams.update({\n 'pgf.texsystem': 'pdflatex',\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n})\n\na=0\nb=4\ndt=0.2\nu0=[1,0]\nv0=[0,1]\nt=np.arange(a,b,dt)\n# tspan=[a,b]\n\ndef particular(t, y):\n u=y[0]\n z=y[1]\n return [z, (-z/(t+2))+(2*u/((t+2)**2))-(4/((t+2)**3))]\n\ndef homog(t, y):\n v=y[0]\n w=y[1]\n return [w, (-w/(t+2))+(2*v/((t+2)**2))]\n\nuSol=solve_ivp(particular, [t[0],t[-1]], u0, t_eval=t)\nvSol=solve_ivp(homog, [t[0],t[-1]], v0, t_eval=t)\nC=(1/3 - uSol.y[1][-1])/(vSol.y[1][-1])\nprint(C)\nySol=uSol.y[0]+C*vSol.y[0]\n\nplt.figure(1)\nplt.plot(uSol.t, uSol.y[0], label='u', c='r')\nplt.plot(vSol.t, vSol.y[0], label='v', c='b')\nplt.legend()\nplt.xlabel('t')\nplt.ylabel('y')\nplt.figure(2)\nplt.plot(uSol.t, ySol, label='y', c='orange')\nplt.legend()\nplt.xlabel('t')\nplt.ylabel('y')\nplt.show()\n", "sub_path": "3rd Year/3AN/Linear Shooting/LinearShooting1.py", "file_name": "LinearShooting1.py", "file_ext": "py", "file_size_in_byte": 1012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.rcParams.update", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.integrate.solve_ivp", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.integrate.solve_ivp", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "328265593", "text": "__author__ = 'cherry'\nimport re\nimport socket\nimport StringIO\nimport ClientForm\nimport requests\nimport requesocks\nfrom BeautifulSoup import BeautifulSoup, SoupStrainer\n# import hashlib\n\n\nclass Rqbrowser:\n \"\"\" Test reg forum\n \"\"\"\n\n def __init__(self):\n self.browser = requesocks.session()\n self.sock5 = False\n self.proxy = False\n self.link_host = ''\n self.link_origin = ''\n\n def set_proxies(self, sock5='', proxy=''):\n if sock5:\n self.sock5 = sock5\n elif proxy:\n self.proxy = proxy\n \n def open(self, url, data=None, files=None, header='', stream=False, timeout=5):\n if self.sock5 or self.proxy:\n def getaddrinfo(*args):\n return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]\n socket.getaddrinfo = getaddrinfo\n if self.sock5:\n self.browser.proxies = {'http': 'socks5://' + self.sock5, 'https': 'socks5://' + self.sock5}\n else:\n self.browser.proxies = {'http': self.proxy, 'https': self.proxy}\n if data:\n rx = self.browser.post(url, data=data, headers=header, timeout=timeout)\n rx.encoding = 'UTF-8'\n else:\n rx = self.browser.get(url, headers=header, timeout=timeout)\n rx.encoding = 'UTF-8'\n if files:\n rx = self.browser.post(url, files=files, timeout=timeout)\n rx.encoding = 'UTF-8'\n if stream:\n rx = self.browser.get(url, stream=True, timeout=timeout)\n rx.encoding = 'UTF-8'\n return rx\n\n def close(self):\n del self.browser\n\n def fix_html(self, html):\n return html.replace(\"'\", '\"')\n\n def select_form(self, rx):\n htmlx = rx.content\n linkget = str(rx.url)\n forms_filter = SoupStrainer('form')\n soup = BeautifulSoup(htmlx, parseOnlyThese=forms_filter)\n forms_post = ClientForm.ParseFile(StringIO.StringIO(soup.prettify()), linkget, backwards_compat=False)\n return forms_post\n\n def fake_socks(self, sock):\n pass\n\n\nclass Login():\n def __init__(self):\n self.session = requests.session()\n\n def search_login(self):\n pass\n", "sub_path": "Poster/browser.py", "file_name": "browser.py", "file_ext": "py", "file_size_in_byte": 2242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requesocks.session", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 32, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 32, "usage_type": "attribute"}, {"api_name": "socket.getaddrinfo", "line_number": 33, "usage_type": "attribute"}, {"api_name": "BeautifulSoup.SoupStrainer", "line_number": 61, "usage_type": "call"}, {"api_name": "BeautifulSoup.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "ClientForm.ParseFile", "line_number": 63, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 63, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "110819845", "text": "\"\"\"Views for the events application.\"\"\"\n\nfrom django.views import generic\nfrom django.shortcuts import get_object_or_404\nfrom events.utils import retrieve_all_events\nfrom events.models import (\n Series,\n Event,\n Session,\n Location,\n ThirdPartyEvent,\n Resource,\n)\n\n\nclass IndexView(generic.ListView):\n \"\"\"View for the events application homepage.\"\"\"\n\n template_name = \"events/index.html\"\n context_object_name = \"events\"\n\n def get_queryset(self):\n \"\"\"Get queryset of all topics.\n\n Returns:\n Queryset of Topic objects ordered by name.\n \"\"\"\n return retrieve_all_events()\n\n\nclass SeriesList(generic.ListView):\n \"\"\"View for all series.\"\"\"\n\n model = Series\n ordering = \"name\"\n context_object_name = \"series_list\"\n template_name = \"events/series_list.html\"\n\n\nclass SeriesView(generic.DetailView):\n \"\"\"View for a event series.\"\"\"\n\n model = Series\n template_name = \"events/series.html\"\n slug_url_kwarg = \"series_slug\"\n context_object_name = \"series\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the session view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(SeriesView, self).get_context_data(**kwargs)\n context[\"events\"] = retrieve_all_events(series=self.object)\n return context\n\n\nclass EventView(generic.DetailView):\n \"\"\"View for a specific event.\"\"\"\n\n model = Event\n template_name = \"events/event.html\"\n context_object_name = \"event\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the event view.\n\n Returns:\n Event object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.filter(is_published=True).select_related(),\n slug=self.kwargs.get(\"event_slug\", None),\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the session view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(EventView, self).get_context_data(**kwargs)\n context[\"sessions\"] = self.object.sessions.order_by(\n \"start_datetime\", \"end_datetime\").prefetch_related(\"locations\")\n context[\"sponsors\"] = self.object.sponsors.order_by(\"name\")\n return context\n\n\nclass SessionView(generic.DetailView):\n \"\"\"View for a specific session.\"\"\"\n\n model = Session\n template_name = \"events/session.html\"\n context_object_name = \"session\"\n\n def get_object(self, **kwargs):\n \"\"\"Retrieve object for the session view.\n\n Returns:\n Session object, or raises 404 error if not found.\n \"\"\"\n return get_object_or_404(\n self.model.objects.select_related(),\n event__slug=self.kwargs.get(\"event_slug\", None),\n slug=self.kwargs.get(\"session_slug\", None)\n )\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the session view.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(SessionView, self).get_context_data(**kwargs)\n context[\"event\"] = self.object.event\n context[\"locations\"] = self.object.locations.order_by(\"name\")\n context[\"resources\"] = self.object.resources.order_by(\"name\")\n return context\n\n\nclass LocationView(generic.DetailView):\n \"\"\"View for a specific location.\"\"\"\n\n model = Location\n template_name = \"events/location.html\"\n slug_url_kwarg = \"location_slug\"\n context_object_name = \"location\"\n\n\nclass ThirdPartyEventView(generic.DetailView):\n \"\"\"View for a specific third party event.\"\"\"\n\n model = ThirdPartyEvent\n template_name = \"events/third-party-event.html\"\n slug_url_kwarg = \"event_slug\"\n context_object_name = \"event\"\n\n\nclass ResourceList(generic.ListView):\n \"\"\"View for all resources.\"\"\"\n\n model = Resource\n ordering = \"name\"\n context_object_name = \"resources\"\n template_name = \"events/resources.html\"\n", "sub_path": "cs4teachers/events/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4036, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.views.generic.ListView", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 16, "usage_type": "name"}, {"api_name": "events.utils.retrieve_all_events", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 31, "usage_type": "name"}, {"api_name": "events.models.Series", "line_number": 34, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 40, "usage_type": "name"}, {"api_name": "events.models.Series", "line_number": 43, "usage_type": "name"}, {"api_name": "events.utils.retrieve_all_events", "line_number": 55, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 59, "usage_type": "name"}, {"api_name": "events.models.Event", "line_number": 62, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 72, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 90, "usage_type": "name"}, {"api_name": "events.models.Session", "line_number": 93, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 103, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 122, "usage_type": "name"}, {"api_name": "events.models.Location", "line_number": 125, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 131, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 131, "usage_type": "name"}, {"api_name": "events.models.ThirdPartyEvent", "line_number": 134, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 140, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 140, "usage_type": "name"}, {"api_name": "events.models.Resource", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "600623419", "text": "from re import sub\nfrom pymystem3 import Mystem\nfrom tqdm import tqdm\nfrom joblib import Parallel\nfrom joblib import delayed\nfrom tqdm import tqdm\nimport warnings\n\nwarnings.simplefilter(\"ignore\")\n\n\nRUSSIAN_STOPWORDS = {\n word.replace(\"\\n\", \"\"): True for word in open(\"RusStopWords.txt\", \"r\")\n}\nBATCH_SIZE = 1000\n\n\nclass TextPreprocessing:\n def __init__(self, df):\n self.df = df.copy()\n self.string_preprocessing()\n self.text_lemmatizing()\n\n def string_preprocessing(self):\n m = Mystem()\n self.df.text = self.df.text.apply(\n lambda string: sub(r\"\\b\\d+\\b\", \"\", sub(r\"[^\\w\\s]\", \" \", str(string)))\n )\n self.df.text = self.df.text.apply(\n lambda string: \"\".join(word.lower() for word in string).split()\n )\n self.df.text = self.df.text.apply(\n lambda string: \" \".join(\n word for word in string if word not in RUSSIAN_STOPWORDS\n )\n )\n\n def lemmatize(self, text):\n m = Mystem()\n merged_text = \"|\".join(text)\n doc = []\n result = []\n for text in m.lemmatize(merged_text):\n if text != \"|\":\n doc.append(text)\n else:\n result.append(doc)\n doc = []\n return result\n\n def text_lemmatizing(self):\n self.df.text = self.df.text.apply(\n lambda texts: [\n texts[i : i + BATCH_SIZE] for i in range(0, len(texts), BATCH_SIZE)\n ]\n )\n self.df.text = self.df.text.apply(\n lambda text_batch: Parallel(n_jobs=-1)(\n delayed(self.lemmatize)(part_of_batch)\n for part_of_batch in tqdm(text_batch)\n )\n )\n", "sub_path": "Preprocessing/Preprocessing.py", "file_name": "Preprocessing.py", "file_ext": "py", "file_size_in_byte": 1739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "warnings.simplefilter", "line_number": 9, "usage_type": "call"}, {"api_name": "pymystem3.Mystem", "line_number": 25, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 27, "usage_type": "call"}, {"api_name": "pymystem3.Mystem", "line_number": 39, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 58, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 59, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "456004489", "text": "import keras\nimport numpy as np\nfrom keras.datasets import imdb\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\nimport matplotlib.pyplot as plt\n\n# 处理的数据集一共是50000条,分别有25000条评论用于训练与测试\n\n# 1. 导入数据,其中参数num_words表示保留了10000个高频词汇,低频的词汇就不保留了\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\nprint(train_data[0])\nprint(train_labels[0])\n\n\n# 2. 将整数序列编码为二进制矩阵\ndef vectorize_sequences(sequences, dimension=10000):\n # (创建一个形状为 (len(sequences), dimension) 的零矩阵)\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1. # (将 results[i] 的指定索引设为 1)\n return results\n\n\n# (将训练数据向量化)\nx_train = vectorize_sequences(train_data)\n# (将测试数据向量化)\nx_test = vectorize_sequences(test_data)\n\nprint(x_train[0])\n\n# 将标签也向量化\ny_train = np.asarray(train_labels.astype('float32'))\ny_test = np.asarray(test_labels.astype('float32'))\n\n# 3. 构建网络\nmodel = models.Sequential()\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(16,activation='relu'))\nmodel.add(layers.Dense(1,activation='sigmoid'))\n\n\n# 4. 编译模型\nmodel.compile(\n optimizer=optimizers.RMSprop(lr=0.001),\n loss='binary_crossentropy',\n metrics=['accuracy']\n)\n\n# 5. 留出验证集\nx_val = x_train[:10000]\npartial_x_train = x_train[10000:]\ny_val = y_train[:10000]\npartial_y_train = y_train[10000:]\n\n# 6.训练模型,这里会返回一个\nhistory =model.fit(\n partial_x_train,\n partial_y_train,\n batch_size=512,\n epochs=20,\n validation_data=(x_val,y_val)\n)\n\n\n# 7. 使用matplotlib画出损失和准确率\nhistory_dict = history.history\nloss_value = history_dict['loss']\nval_loss_value = history_dict['val_loss']\n\nepochs = range(1,len(loss_value)+1)\nplt.plot(epochs,loss_value,'bo',label = 'train_loss')\nplt.plot(epochs,val_loss_value,'b',label = 'val_loss')\nplt.title('train and validation loss')\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.legend()\nplt.show()", "sub_path": "learn_keras/imdb.py", "file_name": "imdb.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.datasets.imdb.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.datasets.imdb", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 38, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 39, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 40, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 41, "usage_type": "name"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "140010017", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 22 11:48:52 2023\n\n@author: ruth\n\"\"\"\n\n\nimport pandas as pd\nfrom pathlib import Path\nfrom collections import Counter\n\ndata_folder = Path('/data_2023/')\nraw_data_folder = Path('/raw_data_update/')\n\n\nreac_prop = pd.read_csv(raw_data_folder / 'reac_prop.tsv', skiprows=351, sep='\\t')\n\ncofactors = set(['WATER', 'MNXM13', 'MNXM735438', 'MNXM3', 'MNXM40333', 'MNXM64096', 'MNXM10'])\n\nmnxr = []\nsubs = []\nprods = []\nsubs_co = []\nprods_co = []\necs = []\n\n\nfor i, row in reac_prop.iterrows():\n if row['#ID'] == \"EMPTY\" or type(row.mnx_equation) != str: continue\n mnxr.append(row['#ID'])\n\n sub, prod = row.mnx_equation.split(' = ')\n sub = set([x.split('@')[0] for x in sub.split(' ') if 'MNXM' in x])\n prod = set([x.split('@')[0] for x in prod.split(' ') if 'MNXM' in x])\n \n sub_co = sub.intersection(cofactors)\n prod_co = prod.intersection(cofactors)\n \n sub = sub - cofactors\n prod = prod - cofactors\n \n subs.append('|'.join(sub))\n prods.append('|'.join(prod))\n \n if len(sub_co)>0:\n subs_co.append('|'.join(sub_co))\n else:\n subs_co.append('')\n if len(prod_co)>0:\n prods_co.append('|'.join(sub_co))\n else:\n prods_co.append('')\n \n ec= row.classifs\n if type(ec) == str:\n ec = ec.split(';')[0]\n else:\n ec = 'NOEC'\n \n ecs.append(ec)\n\ndf = pd.DataFrame({'ec':ecs, 'id': mnxr, 'dir':[0]*len(mnxr), 's': subs, 'p':prods, 'sc': subs_co, 'pc': prods_co})\ndf.to_csv(data_folder / 'rxn_consensus_20160612.txt', header = False, index=False, sep='\\t')\n\n\n\n\n\n\n", "sub_path": "data_update/make_consensus_dir_EMPTY.py", "file_name": "make_consensus_dir_EMPTY.py", "file_ext": "py", "file_size_in_byte": 1620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "496980308", "text": "from django.http import HttpResponseRedirect\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom documents.models import Document, Add_Document_Form\nfrom classlists.models import Classes\nfrom homework.models import Homework\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom datetime import date, timedelta\n\nclass DocumentListView(ListView):\n template_name=\"document_list.html\"\n \n def get_queryset(self):\n class_db=Classes.objects.get(classes=self.kwargs['class_url'])\n return Document.objects.select_related().filter(class_db__exact=class_db)\n \n def get_context_data(self, **kwargs):\n class_url=self.kwargs['class_url']\n context=super(DocumentListView, self).get_context_data(**kwargs)\n context['class_url']=class_url.lower()\n class_db=Classes.objects.get(classes=self.kwargs['class_url'])\n document_list=Document.objects.select_related().filter(class_db__exact=class_db)\n subjects=document_list.values_list('subject', flat=True).distinct()\n context['subject_list']=subjects\n return context\n \n\nclass DocumentCreateView(CreateView):\n\tmodel=Document\n\tform_class=Add_Document_Form\n\ttemplate_name=\"documents/document_form.html\"\n\t\n# \tdef get_initial(self, **kwargs):\n# \t initial=super(DocumentCreateView, self).get_initial()\n# \t class_url=self.kwargs['class_url']\n# \t class_db=Classes.objects.filter(classes=self.kwargs['class_url'])\n# \t initial['class_db']=class_db\n# \t return initial\n\t\n\tdef get_context_data(self, **kwargs):\n\t class_url=self.kwargs['class_url']\n\t class_db=Classes.objects.get(classes=self.kwargs['class_url'])\n\t context=super(DocumentCreateView, self).get_context_data(**kwargs)\n\t context['form'].fields['homework'].queryset=Homework.objects.exclude(due_date__date__lt=(date.today())).filter(class_db=class_db)\n\t context['class_url']=class_url.lower()\n\t return context\n\t \n\tdef form_valid(self, form):\n\t class_db=Classes.objects.get(classes=self.kwargs['class_url'])\n\t \n\t new_document=form.save(commit=False)\n\t if new_document.subject == None:\n\t new_document.subject='Other'\n\t new_document.filename=new_document.attached_file.name\n\t new_document.save()\n\t new_document.class_db.add(class_db)\n\t return HttpResponseRedirect(reverse_lazy('document-list-view', args=(self.kwargs['class_url'],),))\n\nclass DocumentUpdateView(UpdateView):\n model=Document\n form_class=Add_Document_Form\n template_name=\"documents/modify_document.html\"\n\n def get_context_data(self, **kwargs):\n class_url=self.kwargs['class_url']\n context=super(DocumentUpdateView, self).get_context_data(**kwargs)\n context['class_url']=class_url.lower()\n return context\n \n def form_valid(self, form):\n pk=self.kwargs['pk']\n new_document=Document.objects.get(id=pk)\n class_db=Classes.objects.get(classes=self.kwargs['class_url'])\n if self.request.POST['mod/del']=='Delete':\n new_document.delete()\n return HttpResponseRedirect(reverse_lazy('document-list-view', args=(self.kwargs['class_url'],),))\n else:\n new_document=form.save(commit=False)\n new_document.filename=(new_document.attached_file.name).lstrip('attachments/')\n new_document.save()\n new_document.class_db.add(class_db)\n return HttpResponseRedirect(reverse_lazy('document-list-view', args=(self.kwargs['class_url'],),))", "sub_path": "documents/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.views.generic.ListView", "line_number": 11, "usage_type": "name"}, {"api_name": "classlists.models.Classes.objects.get", "line_number": 15, "usage_type": "call"}, {"api_name": "classlists.models.Classes.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "classlists.models.Classes", "line_number": 15, "usage_type": "name"}, {"api_name": "documents.models.Document.objects.select_related", "line_number": 16, "usage_type": "call"}, {"api_name": "documents.models.Document.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "documents.models.Document", "line_number": 16, "usage_type": "name"}, {"api_name": "classlists.models.Classes.objects.get", "line_number": 22, "usage_type": "call"}, {"api_name": "classlists.models.Classes.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "classlists.models.Classes", "line_number": 22, "usage_type": "name"}, {"api_name": "documents.models.Document.objects.select_related", "line_number": 23, "usage_type": "call"}, {"api_name": "documents.models.Document.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "documents.models.Document", "line_number": 23, "usage_type": "name"}, {"api_name": "django.views.generic.edit.CreateView", "line_number": 29, "usage_type": "name"}, {"api_name": "documents.models.Document", "line_number": 30, "usage_type": "name"}, {"api_name": "documents.models.Add_Document_Form", "line_number": 31, "usage_type": "name"}, {"api_name": "classlists.models.Classes.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "classlists.models.Classes.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "classlists.models.Classes", "line_number": 43, "usage_type": "name"}, {"api_name": "homework.models.Homework.objects.exclude", "line_number": 45, "usage_type": "call"}, {"api_name": "homework.models.Homework.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "homework.models.Homework", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 45, "usage_type": "name"}, {"api_name": "classlists.models.Classes.objects.get", "line_number": 50, "usage_type": "call"}, {"api_name": "classlists.models.Classes.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "classlists.models.Classes", "line_number": 50, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 58, "usage_type": "call"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 60, "usage_type": "name"}, {"api_name": "documents.models.Document", "line_number": 61, "usage_type": "name"}, {"api_name": "documents.models.Add_Document_Form", "line_number": 62, "usage_type": "name"}, {"api_name": "documents.models.Document.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "documents.models.Document.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "documents.models.Document", "line_number": 73, "usage_type": "name"}, {"api_name": "classlists.models.Classes.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "classlists.models.Classes.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "classlists.models.Classes", "line_number": 74, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 77, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 77, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 83, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "499512776", "text": "\nimport numpy as np\nimport math\nfrom astropy.cosmology import WMAP9 as cosmo\n\n# ---- define a lot of parameterised SFHs.\n\n\n\ndef const(ages, p):\n\n SFH = np.ones(ages.shape)\n SFH[ages>= p['age']] = 0.0\n SFH[0] *= 2.\n \n return SFH \n \ndef exp(ages, p):\n\n SFH = np.zeros(ages.shape)\n SFH[ages out/', path_file[:-5] + '_fix.xlsx')\r\n\r\n\r\n__init__()\r\n", "sub_path": "setup_excel_bom_ver1.py", "file_name": "setup_excel_bom_ver1.py", "file_ext": "py", "file_size_in_byte": 2739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.path.isdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.mkdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 31, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "143662340", "text": "from flask import Flask, render_template, request, jsonify\nimport sys\nimport os\nsys.path.append(os.getcwd())\nfrom elopy import *\n\napp = Flask(__name__)\nrating = Implementation()\n\n\ndef load_data_from_file(rating):\n token = None\n try:\n token_file = open('token.txt', 'r')\n token = token_file.read().strip()\n token_file.close()\n rating_file = open('ratings.txt', 'r')\n for player_data in rating_file.readlines():\n player_data_array = player_data.split('_')\n rating.addPlayer(player_data_array[0], float(player_data_array[1]), int(player_data_array[2]),\n int(player_data_array[3]), float(player_data_array[4]), int(player_data_array[5]))\n rating_file.close()\n match_file = open('matches.txt', 'r')\n for match_data in match_file.readlines():\n if '_' in match_data:\n match_data_array = match_data.split('_')\n rating.addMatchToList(match_data_array[0], match_data_array[1].rstrip())\n match_file.close()\n except FileNotFoundError:\n if token is None:\n raise Exception('Token cannot be empty!')\n\n return token\n\n\ntoken = load_data_from_file(rating)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/admin', methods=('GET', 'POST'))\ndef admin():\n if request.method == 'POST':\n if len(request.form) == 1:\n if request.form['playername'].split(' ', 1)[0] == token:\n rating.addPlayer(request.form['playername'].split(' ', 1)[1])\n else:\n if request.form['victorious'].split(' ', 1)[0] == token:\n victorious = request.form['victorious'].split(' ', 1)[1]\n record_match_and_update_files(rating, victorious, request.form['defeated'])\n\n return render_template('admin.html', rating_list=rating.getRatingList(), matches_list = rating.getMatchesList())\n\n\n@app.route('/ping-pong')\ndef ping_pong():\n return render_template('ping-pong.html', rating_list=rating.getRatingList(), matches_list = rating.getMatchesList())\n\n\n@app.route('/get_ratings')\ndef get_ratings():\n array = []\n for player_rating in rating.getRatingList():\n dict = {}\n dict['name'] = player_rating[0]\n dict['rating'] = player_rating[1]\n dict['matches'] = player_rating[2]\n dict['win_streak'] = player_rating[3]\n dict['highest_rating'] = player_rating[4]\n dict['rank_image'] = player_rating[5]\n dict['victories'] = player_rating[6]\n array.append(dict)\n\n return jsonify(array)\n\n\n@app.route('/fifa')\ndef fifa():\n return render_template('fifa.html')\n\n\ndef record_match_and_update_files(rating, victorius, defeated, rating_file='ratings.txt', match_file='matches.txt'):\n rating.recordMatch(victorius, defeated, winner=victorius)\n save_ratings_to_file(rating, rating_file)\n save_matches_to_file(rating, match_file)\n\n\ndef save_ratings_to_file(rating, file_name=\"ratings.txt\"):\n rating_file = open(file_name, \"w+\")\n for (player, ranking, matches, win_streak, highest_rating, rank_image, victories) in rating.getRatingList():\n rating_file.write(\"{}_{}_{}_{}_{}_{}\\n\".format(player, ranking, matches, win_streak, highest_rating, victories))\n rating_file.close()\n\n\ndef save_matches_to_file(rating, file_name=\"matches.txt\"):\n match_file = open(file_name, \"w+\")\n for (winner, defeated) in rating.getMatchesList():\n match_file.write(\"{}_{}\\n\".format(winner, defeated))\n match_file.close()", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "360409771", "text": "from django.core.files.storage import Storage\nfrom django.conf import settings\nfrom fdfs_client.client import Fdfs_client\n\n\nclass FDFSStorage(Storage):\n def __init__(self,client_conf=None,base_ur=None):\n if client_conf is None:\n client_conf = settings.FDFS_CLIENT_CONF\n self.client_conf = client_conf\n\n if base_ur is None:\n base_ur = settings.FDFS_URL\n self.base_url = base_ur\n\n def _open(self,name,mode='rb'):\n pass\n\n def _save(self, name, content, max_length=None):\n print(\"ok\")\n #创建一个fdfs client对象\n client = Fdfs_client(self.client_conf)\n #上传文件到fastdfs系统中\n res = client.upload_by_buffer(content.read())\n print(res)\n #判断res内容\n if res.get('Status') != \"Upload successed.\":\n raise Exception(\"上传文件到fastdfs失败\")\n\n filename = res.get(\"Remote file_id\")\n\n return filename\n\n def exists(self, name):\n return False\n\n def url(self, name):\n return self.base_url + name", "sub_path": "diaryfresh/utils/fdfs/storage.py", "file_name": "storage.py", "file_ext": "py", "file_size_in_byte": 1076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.core.files.storage.Storage", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.settings.FDFS_CLIENT_CONF", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.settings.FDFS_URL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "fdfs_client.client.Fdfs_client", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "295213735", "text": "import requests\nimport datetime\nfrom collections import namedtuple\n\nrepository = namedtuple('Repository', 'url name owner issues')\n\n\ndef get_time():\n n_days_ago = datetime.datetime.today() - datetime.timedelta(days=7)\n return n_days_ago.strftime('%Y-%m-%d')\n\n\ndef get_trending_repositories(top_size):\n url = 'https://api.github.com/search/repositories'\n params = {\n 'q': 'created:>{}'.format(get_time()),\n 'sort': 'stars',\n 'order': 'desc',\n 'per_page': top_size\n }\n responce = requests.get(url=url, params=params)\n return [repository(url=x['html_url'],\n name=x['name'],\n owner=x['owner']['login'],\n issues=get_open_issues_amount(x['owner']['login'], x['name']))\n for x in responce.json()['items']]\n\n\ndef print_repos(repositories):\n for repo in sorted(repositories, key=lambda x: len(x.issues)):\n print('*' * 20)\n print('Repository \"{}\"'.format(repo.name))\n print('url: {}'.format(repo.url))\n print('issues: {}'.format(len(repo.issues)))\n print('\\n'.join(['{} {}'.format(x['title'], x['url']) for x in repo.issues]))\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n url = 'https://api.github.com/repos/{}/{}/issues'\n responce = requests.get(url=url.format(repo_owner, repo_name))\n return responce.json()\n\nif __name__ == '__main__':\n repo_quantity = 20\n repositories = get_trending_repositories(repo_quantity)\n print_repos(repositories)", "sub_path": "github_trending.py", "file_name": "github_trending.py", "file_ext": "py", "file_size_in_byte": 1522, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.namedtuple", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "593818635", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport time\nimport json\n\nfrom serial import Serial\nfrom GChartWrapper import Sparkline\nfrom argparse import ArgumentParser\n\n\ndef data_limits(data):\n pmax, pmin = max(data), min(data)\n\n size = (pmax - pmin)\n size = size if size >= 10 else 10\n\n return pmax + (size * 0.25), pmin - (size * 0.25), (size * 0.5)\n\n\ndef data_percent(data, minimum, maximum):\n maximum = int(maximum)\n minimum = int(minimum)\n\n size = maximum - minimum\n\n return [((float(value - minimum)) / float(size)) * 100.0 for value in data]\n\nif __name__ == '__main__':\n parse = ArgumentParser()\n\n parse.add_argument('-o', '--output', dest='outfile', required=True)\n parse.add_argument('--output-data', dest='outdata', default=None)\n parse.add_argument('-p', '--port', dest='port', required=True)\n parse.add_argument('-i', '--interval', dest='interval', default=5, type=int)\n parse.add_argument('--data-size', dest='data_size', default=30, type=int)\n parse.add_argument('--width', dest='width', default=600, type=int)\n parse.add_argument('--height', dest='height', default=240, type=int)\n parse.add_argument('--color', dest='color', default='ff0000')\n\n opts = parse.parse_args()\n\n channel = Serial(port=opts.port)\n started = False\n try:\n data = []\n while True:\n signal = '.'\n channel.write('t')\n value = float(channel.read(channel.inWaiting()) or 0)\n if value == 0.0 and not started:\n pass\n else:\n started = True\n data.insert(0, value / 10.0)\n data = data if len(data) < opts.data_size else data[:opts.data_size]\n\n lmax, lmin, lsize = data_limits(data)\n\n g = Sparkline(data_percent(data, lmin, lmax), encoding='text')\n g.size(opts.width, opts.height)\n g.color(opts.color)\n g.line(2, 0, 0)\n g.marker('o', opts.color, 0, -1, 5)\n g.axes('y')\n g.axes.range(0, int(lmin), int(lmax), 1)\n g.save(opts.outfile)\n signal = '+'\n\n with open(opts.outdata, 'w') as fd:\n obj = {\n 'lastTemp': value / 10.0,\n 'interval': opts.interval,\n 'data': data\n }\n json.dump(obj, fd, indent=4)\n\n sys.stdout.write(signal)\n sys.stdout.flush()\n\n if started:\n time.sleep(opts.interval)\n else:\n time.sleep(1)\n except KeyboardInterrupt:\n print('exit')\n", "sub_path": "support/monitor.py", "file_name": "monitor.py", "file_ext": "py", "file_size_in_byte": 2704, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 43, "usage_type": "call"}, {"api_name": "GChartWrapper.Sparkline", "line_number": 60, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 79, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "277425782", "text": "from django import template\nfrom app.models import Order\n\nregister = template.Library()\n\n@register.filter\ndef itemCount(user):\n if user.is_authenticated:\n order = Order.objects.filter(user=user, ordered=False)\n if order.exists():\n return order[0].items.count()\n return 0\n", "sub_path": "app/templatetags/cart_tags.py", "file_name": "cart_tags.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.template.Library", "line_number": 4, "usage_type": "call"}, {"api_name": "django.template", "line_number": 4, "usage_type": "name"}, {"api_name": "app.models.Order.objects.filter", "line_number": 9, "usage_type": "call"}, {"api_name": "app.models.Order.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.models.Order", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "527886768", "text": "#!/usr/bin/env python3\n#somethingiswrong.py\n\nimport os, sys, pickle, psutil, shutil\nsys.path.insert(0, r'/opt/ish')\nimport dialog\nfrom gi.repository import Gtk\nfrom time import localtime, strftime\nfrom os.path import expanduser\n\nhome_dir = expanduser(\"~\")\n\nclass Handler:\n # Exit app if window is closed. Traps the signal from GUI.\n def on_window4_destroy(self, *args):\n Gtk.main_quit(*args)\n\n # Exit app if abort\n def on_w4_btn_esc_pressed(self, abort):\n Gtk.main_quit(abort)\n\n def on_chooseapp_changed(self, combo):\n ''' Choose app to work with. '''\n app = combo.get_active_text()\n if app != None:\n global app_buffer\n app_buffer = (\"%s\" %app)\n# print (app_buffer) # Debug entry\n\n def get_app(self):\n ''' Get app to work with. '''\n if 'app_buffer' in globals():\n if app_buffer != '':\n return app_buffer\n else:\n dialog.error_message(\"Du måste välja vilket program du har problem med.\")\n else:\n dialog.error_message(\"Du måste välja vilket program du har problem med.\")\n\n def make_file(self, file_name, date):\n ''' Create a file with todays date.\n\n This so we don't force users to reset more then once per day.'''\n temp_path = home_dir + \"/.support/\" + file_name\n file_object = open(temp_path, 'wb')\n pickle.dump(date, file_object)\n file_object.close()\n\n def get_file(self, file_name):\n ''' Get a file that shows when they reset there app last time. '''\n temp_path = home_dir + \"/.support/\" + file_name\n if os.path.exists(temp_path):\n file_object = open(temp_path,'rb')\n date_restored = pickle.load(file_object)\n return date_restored\n else:\n return None\n \n def set_res_app(self, appname):\n ''' Set path to config files and systems prog name\n \n We want to kill the apps via name and remove there config-files.\n Maybe put this in another file, so it's easier to edit.\n '''\n ##############################################################\n # Dont forget to add appname in support.py around line 130(+)#\n # Failure to do so gives input error message after reset. #\n ##############################################################\n if \"LibreOffice\" in appname:\n xpath = home_dir + \"/.config/libreoffice\"\n pname = \"soffice.bin\" # not tested\n return {'xpath':xpath, 'pname':pname}\n elif \"Iceweasel\" in appname:\n xpath = home_dir + \"/.mozilla\"\n pname = \"iceweasel\" \n# pname = r\"/usr/lib/firefox/firefox\"\n return {'xpath':xpath, 'pname':pname}\n elif \"Chromium\" in appname:\n xpath = home_dir + \"/.config/chromium\"\n pname = \"chromium\"\n return {'xpath':xpath, 'pname':pname} \n elif \"Gimp\" in appname:\n xpath = home_dir + \"/.gimp-2.8\"\n pname = \"gimp-2.8\"\n return {'xpath':xpath, 'pname':pname}\n elif \"InDesign\" in appname:\n xpath = home_dir + \"/.freerdp\"\n pname = \"xfreerdp\"\n return {'xpath':xpath, 'pname':pname}\n elif \"Audition\" in appname:\n xpath = home_dir + \"/.ntprofile.V2\"\n pname = None # Cant kill Audition since its not running via this system\n return {'xpath':xpath, 'pname':pname}\n elif \"Firstclass klienten\" in appname:\n xpath = home_dir + \"/firstclass\"\n pname = \"fcc\"\n return {'xpath':xpath, 'pname':pname}\n else: return None\n \n def kill_app(self, pname):\n print (\"Trying to kill \" + pname)\n for proc in psutil.process_iter():\n if proc.name == pname:\n print (\"Process found\")\n proc.kill()\n else: print (\"Process not found\")\n \n def rm_settings(self, xpath):\n if os.path.isdir(xpath):\n shutil.rmtree(xpath, ignore_errors=False, onerror=None)\n elif os.path.isfile(xpath):\n os.remove(xpath)\n\n def dothefix(self, appname):\n '''Try to close app and remove settings.\n '''\n res_app = self.set_res_app(appname)\n if res_app:\n# print (\"Got process name: \" + (res_app['pname'])) # Debug entry, gives error if (res_app['pname'])) is None\n if res_app['pname']: # To make sure pname is not empty (Audition).\n print (\"Process name \" + res_app['pname'] + \" seems ok\")\n self.kill_app(res_app['pname'])\n self.rm_settings(res_app['xpath'])\n\n else:\n dialog.error_message(\"Kunde inte återställa \" + appname + \".\\nNågot är fel. Kontakta IT-ansvarig.\")\n sys.exit(1)\n\n appfile = \"/.restored_\" + appname\n self.make_file(appfile, (dialog.get_date())) \n dialog.info_message(appname + \" har återställts.\\nGå tillbaka till \" + appname + \" och se om det löste problemet.\\n\\\n\\nOm problemet upprepas gör en ny felanmälan.\")\n sys.exit(0) \n\n def try2fix(self, appname):\n '''Checks if app has been restored and give options.\n '''\n# print (appname) # Debug entry \n check_restored = \"/.restored_\" + appname\n# print (check_restored) # Debug entry\n date_restored = self.get_file(check_restored)\n\n if not (date_restored) == (dialog.get_date()):\n answer = self.cancel_message(\"Du har problem med \" + appname + \".\",\n \"Innan du gör en felanmälan ska du alltid prova att återställa programmet.\\n\"\\\n+ appname + \" kommer att stängas och återställas.\\n\\nVill du återställa \" + appname + \" nu?\")\n\n if \"ok\" in [(answer)]:\n print (appname) # Sends appname back to support.py so we know the user choosed to reset (fix) the app\n self.dothefix(appname)\n else:\n dialog.error_message(\"Genom att försöka återställa \" + appname + \" kan du själv lösa många problem.\\n\\nDu kan inte göra en felanmälan innan du försökt återställa minst en gång.\")\n\n else:\n # Ask if user want to reset the app again.\n again = self.cancel_message(\"Du har redan återställt \" + appname + \" idag.\",\\\n\"Du har redan återställt \" + appname + \" idag.\\n\"\\\n\"Vill du prova att återställa igen (JA)?\\n\\nOm du väljer NEJ kommer du till ett formulär\\ndär du kan göra en felanmälan.\")\n if \"ok\" in [(again)]:\n print (appname) # Sends appname back to support.py so we know the user choosed to reset (fix) the app\n self.dothefix(appname)\n elif \"cancel\" in [(again)]:\n # Sends \"mkcall\" back to support.py.. Opens error_report.py\n print (\"mkcall\")\n Gtk.main_quit(\"mkcall\") \n else:\n sys.exit(0)\n \n def on_w4_btn_ok_pressed(self, ok):\n '''Call try2fix-function with app name.\n \n If Annat or Skrivare, give a simpler form. -> error.py\n '''\n if self.get_app:\n app2fix = self.get_app()\n# print (app2fix) # Debug entry\n if app2fix:\n #if \"Annat\" or \"Skrivare\" in [(app2fix)]: # or doesnt work. Everything passes. How to test two strings in one statement?\n if \"Annat\" in [(app2fix)]: \n print (\"otherapp\") # Sends \"otherapp\" to support.py\n Gtk.main_quit(\"otherapp\") # Was otherapp\n elif \"Skrivare\" in [(app2fix)]:\n print (\"otherapp\")\n Gtk.main_quit(\"otherapp\")\n else: \n self.try2fix(app2fix) \n else:\n dialog.error_message(\"Inget program valdes.. Något är fel. Avslutar.\")\n sys.exit(1)\n\n def cancel_message(self, title, message):\n '''Create an info message dialog and display it modally to the user.\n \n We need to handle this message a bit different then the info message in dialog.py\n '''\n# print (message) # Debug entry \n dialog = Gtk.MessageDialog(None,\n Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,\n Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO, message)\n dialog.set_title(title)\n response = dialog.run()\n# print (response) # Debug entry\n dialog.destroy()\n if (response == Gtk.ResponseType.YES): \n return (\"ok\")\n elif (response == Gtk.ResponseType.NO):\n return (\"cancel\")\n else: # Probably -4 DELETE_EVENT\n sys.exit(0)\n \n def __init__(self):\n '''Use GtkBuilder to build our interface from the XML file.\n \n We use the initialization of the Handler class to establish\n references to the widgets we'll need to work with in the callbacks for\n various signals. This is done using the XML file we created with Glade\n '''\n try:\n builder = Gtk.Builder()\n builder.add_from_file(\"somethingiswrong.glade\") \n except:\n dialog.error_message(\"Failed to load UI XML file: somethingiswrong.glade\")\n sys.exit(1) \n \n # get the widgets which will be referenced in callbacks\n self.window4 = builder.get_object(\"window4\") \n\n builder.connect_signals(self)\n\n def main(self):\n '''Run main application window\n \n Exit app if main win is closed (x:ed). Even if signal from GUI is missing.\n '''\n self.window4.connect(\"delete-event\", Gtk.main_quit)\n self.window4.show()\n Gtk.main()\n \nif __name__ == \"__main__\":\n smtwrong = Handler()\n smtwrong.main()\n", "sub_path": "somethingiswrong.py", "file_name": "somethingiswrong.py", "file_ext": "py", "file_size_in_byte": 10008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.insert", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 11, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 16, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 16, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 20, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 20, "usage_type": "name"}, {"api_name": "dialog.error_message", "line_number": 36, "usage_type": "call"}, {"api_name": "dialog.error_message", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 54, "usage_type": "call"}, {"api_name": "psutil.process_iter", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 112, "usage_type": "call"}, {"api_name": "dialog.error_message", "line_number": 126, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 127, "usage_type": "call"}, {"api_name": "dialog.get_date", "line_number": 130, "usage_type": "call"}, {"api_name": "dialog.info_message", "line_number": 131, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}, {"api_name": "dialog.get_date", "line_number": 143, "usage_type": "call"}, {"api_name": "dialog.error_message", "line_number": 152, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 165, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 165, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 167, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 181, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 181, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 184, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 184, "usage_type": "name"}, {"api_name": "dialog.error_message", "line_number": 188, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 189, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.MessageDialog", "line_number": 197, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 197, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.DialogFlags", "line_number": 198, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 198, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.MessageType", "line_number": 199, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 199, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ButtonsType", "line_number": 199, "usage_type": "attribute"}, {"api_name": "dialog.set_title", "line_number": 200, "usage_type": "call"}, {"api_name": "dialog.run", "line_number": 201, "usage_type": "call"}, {"api_name": "dialog.destroy", "line_number": 203, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 204, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 204, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 206, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 206, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 209, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Builder", "line_number": 219, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 219, "usage_type": "name"}, {"api_name": "dialog.error_message", "line_number": 222, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 223, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 235, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 235, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main", "line_number": 237, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 237, "usage_type": "name"}]} +{"seq_id": "185549700", "text": "#!/usr/bin/python3\n\nimport subprocess\nimport sys\nimport os\nimport re\nimport argparse\nimport itertools\nimport socket\nimport json\nfrom urllib.request import urlopen, Request\n\nl3cache_coreid_d = {\"Standard_HB120rs_v3\": [\"0-7\",\"8-15\",\"16-23\",\"24-29\",\"30-37\",\"38-45\",\"46-53\",\"54-59\",\"60-67\",\"68-75\",\"76-83\",\"84-89\",\"90-97\",\"98-105\",\"106-113\",\"114-119\"]}\nl3cache_coreid_d = {\"Standard_HB120rs_v3\": {\"l3cache_ids\": {0: [0,1,2,3,4,5,6,7],\\\n 1: [8,9,10,11,12,13,14,15],\\\n 2: [16,17,18,19,20,21,22,23],\\\n 3: [24,25,26,27,28,29],\\\n 4: [30,31,32,33,34,35,36,37],\\\n 5: [38,39,40,41,42,43,44,45],\\\n 6: [46,47,48,49,50,51,52,53],\\\n 7: [54,55,56,57,58,59],\\\n 8: [60,61,62,63,64,65,66,67],\\\n 9: [68,69,70,71,72,73,74,75],\\\n 10: [76,77,78,79,80,81,82,83],\\\n 11: [84,85,86,87,88,89],\\\n 12: [90,91,92,93,94,95,96,97],\\\n 13: [98,99,100,101,102,103,104,105],\\\n 14: [106,107,108,109,110,111,112,113],\\\n 15: [114,115,116,117,118,119]\\\n }},\n \"Standard_HB120-96rs_v3\": {\"l3cache_ids\": {0: [0,1,2,3,4,5],\\\n 1: [6,7,8,9,10,11],\\\n 2: [12,13,14,15,16,17],\\\n 3: [18,19,20,21,22,23],\\\n 4: [24,25,26,27,28,29],\\\n 5: [30,31,32,33,34,35],\\\n 6: [36,37,38,39,40,41],\\\n 7: [42,43,44,45,46,47],\\\n 8: [48,48,50,51,52,53],\\\n 9: [54,55,56,57,58,59],\\\n 10: [60,61,62,63,64,65],\\\n 11: [66,67,68,69,70,71],\\\n 12: [72,73,74,75,76,77],\\\n 13: [78,79,80,81,82,83],\\\n 14: [84,85,86,87,88,89],\\\n 15: [90,91,92,93,94,95]\\\n }},\n \"Standard_HB120-64rs_v3\": {\"l3cache_ids\": {0: [0,1,2,3],\\\n 1: [4,5,6,7],\\\n 2: [8,9,10,11],\\\n 3: [12,13,14,15],\\\n 4: [16,17,18,19],\\\n 5: [20,21,22,23],\\\n 6: [24,25,26,27],\\\n 7: [28,29,30,31],\\\n 8: [32,33,34,35],\\\n 9: [36,37,38,39],\\\n 10: [40,41,42,43],\\\n 11: [44,45,46,47],\\\n 12: [48,49,50,51],\\\n 13: [52,53,54,55],\\\n 14: [56,57,58,59],\\\n 15: [60,61,62,63]\\\n }},\n \"Standard_HB120-32rs_v3\": {\"l3cache_ids\": {0: [0,1],\\\n 1: [2,3],\\\n 2: [4,5],\\\n 3: [6,7],\\\n 4: [8,9],\\\n 5: [10,11],\\\n 6: [12,13],\\\n 7: [14,15],\\\n 8: [16,17],\\\n 9: [18,19],\\\n 10: [20,21],\\\n 11: [22,23],\\\n 12: [24,25],\\\n 13: [26,27],\\\n 14: [28,29],\\\n 15: [30,31]\\\n }},\n \"Standard_HB120-16rs_v3\": {\"l3cache_ids\": {0: [0],\\\n 1: [1],\\\n 2: [2],\\\n 3: [3],\\\n 4: [4],\\\n 5: [5],\\\n 6: [6],\\\n 7: [7],\\\n 8: [8],\\\n 9: [9],\\\n 10: [10],\\\n 11: [11],\\\n 12: [12],\\\n 13: [13],\\\n 14: [14],\\\n 15: [15]\\\n }},\n \"Standard_ND96asr_v4\": {\"l3cache_ids\": {0: [0,1,2,3],\\\n 1: [4,5,6,7],\\\n 2: [8,9,10,11],\\\n 3: [12,13,14,15],\\\n 4: [16,17,18,19],\\\n 5: [20,21,22,23],\\\n 6: [24,25,26,27],\\\n 7: [28,29,30,31],\\\n 8: [32,33,34,35],\\\n 9: [36,37,38,39],\\\n 10: [40,41,42,43],\\\n 11: [44,45,46,47],\\\n 12: [48,49,50,51],\\\n 13: [52,53,54,55],\\\n 14: [56,57,58,59],\\\n 15: [60,61,62,63],\\\n 16: [64,65,66,67],\\\n 17: [68,69,70,71],\\\n 18: [72,73,74,75],\\\n 19: [76,77,78,79],\\\n 20: [80,81,82,83],\\\n 21: [84,85,86,87],\\\n 22: [88,89,90,91],\\\n 23: [92,93,94,95]\\\n }}}\n\n\ndef get_vm_metadata():\n metadata_url = \"http://169.254.169.254/metadata/instance?api-version=2017-08-01\"\n metadata_req = Request(metadata_url, headers={\"Metadata\": True})\n\n for _ in range(30):\n# print(\"Fetching metadata\")\n metadata_response = urlopen(metadata_req, timeout=2)\n\n try:\n return json.load(metadata_response)\n except ValueError as e:\n print(\"Failed to get metadata %s\" % e)\n print(\" Retrying\")\n sleep(2)\n continue\n except:\n print(\"Unable to obtain metadata after 30 tries\")\n raise\n\n\ndef one_numa(row_l):\n oneNuma = True\n for row in row_l:\n if \"NUMANode\" in str(row):\n oneNuma = False\n break\n return oneNuma\n\n\ndef parse_lstopo():\n cmd = [\"lstopo-no-graphics\", \"--no-caches\"]\n try:\n cmdpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except FileNotFoundError:\n print(\"Error: Could not find the executable (lstopo-on-graphics), make sure you have installed the hwloc package.\")\n sys.exit(1)\n#print(cmdpipe.stderr.readline())\n topo_d = {}\n topo_d[\"numanode_ids\"] = {}\n\n row_l = cmdpipe.stdout.readlines()\n if one_numa(row_l):\n numanode = 0\n topo_d[\"numanode_ids\"][numanode] = {}\n topo_d[\"numanode_ids\"][numanode][\"core_ids\"] = []\n topo_d[\"numanode_ids\"][numanode][\"gpu_ids\"] = []\n\n for row in row_l:\n row_s = str(row)\n if \"NUMANode\" in row_s:\n row_l = row_s.split()\n numanode = int(row_l[2][2:])\n# print(numanode)\n topo_d[\"numanode_ids\"][numanode] = {}\n topo_d[\"numanode_ids\"][numanode][\"core_ids\"] = []\n topo_d[\"numanode_ids\"][numanode][\"gpu_ids\"] = []\n if \"Core\" in row_s:\n row_l = row_s.split()\n core_id = re.findall(r'\\d+',row_l[-1])[0]\n topo_d[\"numanode_ids\"][numanode][\"core_ids\"].append(int(core_id))\n if re.search(r'GPU.*card', row_s):\n row_l = row_s.split()\n gpu_id = re.findall(r'\\d+',row_l[-1])[0]\n topo_d[\"numanode_ids\"][numanode][\"gpu_ids\"].append(int(gpu_id))\n cmdpipe.stdout.close()\n cmdpipe.stderr.close()\n# print(topo_d)\n return topo_d\n\n\ndef create_l3cache_topo(actual_sku_name):\n l3cache_topo_d = {}\n for sku_name in l3cache_coreid_d:\n if sku_name == actual_sku_name:\n l3cache_topo_d = l3cache_coreid_d[sku_name]\n break\n\n return l3cache_topo_d\n\n\ndef find_pids(pattern):\n cmd = [\"pgrep\",pattern]\n pids_l = []\n cmdpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n bpids_l = cmdpipe.stdout.readlines()\n if not bpids_l:\n print(\"Error: Cannot find application ({}), check that it is running.\".format(pattern))\n sys.exit(1)\n for bpid in bpids_l:\n pids_l.append(int(bpid))\n return pids_l\n\n\ndef find_threads(pids_l):\n process_d = {}\n process_d[\"pids\"] = {}\n for pid in pids_l:\n process_d[\"pids\"][pid] = {}\n threads_l = os.listdir(os.path.join(\"/proc\",str(pid),\"task\"))\n nrthreads = find_running_threads(pid, threads_l)\n# print(\"Num running threads = \",nrthreads)\n filepath = os.path.join(\"/proc\",str(pid),\"status\")\n f = open(filepath)\n for line in f:\n if \"Threads\" in line:\n num_threads = line.split(\":\")[1].strip()\n process_d[\"pids\"][pid][\"num_threads\"] = num_threads\n process_d[\"pids\"][pid][\"running_threads\"] = nrthreads\n# print(num_threads)\n if \"Cpus_allowed_list\" in line:\n cpus_allowed = line.split(\":\")[1].strip()\n process_d[\"pids\"][pid][\"cpus_allowed\"] = cpus_allowed\n# print(cpus_allowed)\n return process_d\n\n\ndef find_gpus_in_numa(numa_id, topo_d):\n for numanode_id in topo_d[\"numanode_ids\"]:\n if numa_id == numanode_id:\n return topo_d[\"numanode_ids\"][numanode_id][\"gpu_ids\"]\n \n\ndef find_process_gpus(topo_d, process_d):\n for pid in process_d[\"pids\"]:\n all_gpus_l = []\n for numa_id in process_d[\"pids\"][pid][\"numas\"]:\n gpus_l = find_gpus_in_numa(numa_id, topo_d)\n all_gpus_l.extend(gpus_l)\n process_d[\"pids\"][pid][\"gpus\"] = all_gpus_l\n\n\ndef find_last_core_id(process_d): \n for pid in process_d[\"pids\"]:\n filepath = os.path.join(\"/proc\",str(pid),\"stat\")\n f = open(filepath)\n for line in f:\n last_core_id = line.split()[38]\n process_d[\"pids\"][pid][\"last_core_id\"] = last_core_id\n \n\ndef conv_indx_str_to_list(indx_str):\n indx_l = []\n if \"-\" in indx_str:\n indx_str_l = indx_str.split(\"-\")\n return list(range(int(indx_str_l[0]),int(indx_str_l[1])+1))\n elif \",\" in indx_str:\n return list(map(int, indx_str.split(\",\")))\n else:\n indx_l.append(int(indx_str))\n return indx_l\n\n\ndef find_numas(cpus_allowed, topo_d):\n numa_l = []\n cpus_l = conv_indx_str_to_list(cpus_allowed)\n for cpu in cpus_l:\n for numa_id in topo_d[\"numanode_ids\"]:\n core_id_l = topo_d[\"numanode_ids\"][numa_id][\"core_ids\"]\n if cpu in core_id_l:\n numa_l.append(int(numa_id))\n return (list(set(numa_l)),len(cpus_l))\n\n\ndef find_process_numas(topo_d, process_d):\n for pid in process_d[\"pids\"]:\n cpus_allowed = process_d[\"pids\"][pid][\"cpus_allowed\"]\n numa_l,lenc = find_numas(cpus_allowed, topo_d)\n process_d[\"pids\"][pid][\"numas\"] = numa_l\n process_d[\"pids\"][pid][\"num_core_ids\"] = lenc\n\n\ndef find_running_threads(pid, threads_l):\n nrthreads = 0\n for thread in threads_l:\n filepath = os.path.join(\"/proc\",str(pid),\"task\",thread,\"status\")\n f = open(filepath)\n for line in f:\n if \"running\" in line:\n nrthreads += 1\n break\n return nrthreads\n\n\ndef calc_total_num_processes(process_d):\n return len(process_d[\"pids\"])\n\n\ndef calc_total_num_gpus(topo_d):\n num_gpus = 0\n for numanode in topo_d[\"numanode_ids\"]:\n num_gpus += len(topo_d[\"numanode_ids\"][numanode][\"gpu_ids\"])\n\n return num_gpus\n\n\ndef calc_total_num_numas(topo_d):\n return len(topo_d[\"numanode_ids\"])\n\n\ndef calc_total_num_l3caches(l3cache_topo_d):\n if l3cache_topo_d:\n return len(l3cache_topo_d[\"l3cache_ids\"])\n else:\n return 0\n\n\ndef calc_total_num_cores(topo_d):\n total_num_cores = 0\n for numnode_id in topo_d[\"numanode_ids\"]:\n core_ids_l = topo_d[\"numanode_ids\"][numnode_id][\"core_ids\"]\n c = len(core_ids_l)\n total_num_cores += len(core_ids_l)\n\n return total_num_cores\n\n\ndef calc_total_num_threads(process_d):\n total_num_threads = 0\n for pid in process_d[\"pids\"]:\n total_num_threads += process_d[\"pids\"][pid][\"running_threads\"]\n\n return total_num_threads\n\n\ndef check_total_threads(total_num_cores, total_num_threads):\n if total_num_threads > total_num_cores:\n print(\"Warning: Total number of threads ({}) is greater than total number of cores ({})\".format(total_num_threads, total_num_cores))\n\n\ndef check_size_core_map_domain(process_d):\n for pid in process_d[\"pids\"]:\n nrthreads = process_d[\"pids\"][pid][\"running_threads\"]\n nmcores = process_d[\"pids\"][pid][\"num_core_ids\"]\n if nrthreads > nmcores:\n print(\"Warning: {} threads are mapped to {} core(s), for pid ({})\".format(nrthreads,nmcores,pid))\n\n\ndef check_numa_per_process(process_d):\n for pid in process_d[\"pids\"]:\n if process_d[\"pids\"][pid][\"running_threads\"] > 0:\n numas = process_d[\"pids\"][pid][\"numas\"]\n if len(numas) > 1:\n print(\"Warning: pid ({} is mapped to more than one numa domain ({})\".format(pid,numas))\n\n\ndef check_process_numa_distribution(total_num_processes, total_num_numa_domains, process_d):\n num_numa_domains = min(total_num_processes, total_num_numa_domains)\n numas_l = []\n for pid in process_d[\"pids\"]:\n# numas = process_d[\"pids\"][pid][\"numas\"]\n for numa_id in process_d[\"pids\"][pid][\"numas\"]:\n if numa_id not in numas_l:\n numas_l.append(numa_id)\n len_numas_l = len(numas_l)\n if len_numas_l < num_numa_domains:\n print(\"Warning: {} processes are mapped to {} Numa domain(s), (but {} Numa domains exist)\".format(total_num_processes,len_numas_l,total_num_numa_domains))\n\n\ndef check_thread_to_gpu(num_threads, num_gpus):\n if num_gpus > 0:\n if num_threads < num_gpus:\n print(\"Warning: Virtual Machine has {} GPU's, but only {} threads are running\".format(num_gpus,num_threads))\n elif num_threads > num_gpus:\n print(\"Warning: Virtual Machine has only {} GPU's, but {} threads are running\".format(num_gpus,num_threads))\n\n\ndef find_l3cache_id(last_core_id, l3cache_topo_d):\n for l3cache_id in l3cache_topo_d[\"l3cache_ids\"]:\n if int(last_core_id) in l3cache_topo_d[\"l3cache_ids\"][l3cache_id]:\n return l3cache_id\n\n\ndef check_processes_to_l3cache(total_num_processes, total_num_l3caches, l3cache_topo_d, process_d):\n if l3cache_topo_d:\n num_l3caches = min(total_num_processes, total_num_l3caches)\n l3caches_l = []\n for pid in process_d[\"pids\"]:\n last_core_id = process_d[\"pids\"][pid][\"last_core_id\"]\n l3cache_id = find_l3cache_id(last_core_id,l3cache_topo_d)\n if l3cache_id not in l3caches_l:\n l3caches_l.append(l3cache_id)\n len_l3caches_l = len(l3caches_l)\n if len_l3caches_l < num_l3caches:\n print(\"Warning: {} processes are mapped to {} L3cache(s), (but {} L3caches exist)\".format(total_num_processes,len_l3caches_l,total_num_l3caches))\n\n\ndef not_in_l3cache(cpus_allowed_l, l3cache_topo_d):\n l3caches_l = []\n for l3cache_id in l3cache_topo_d[\"l3cache_ids\"]:\n for core_id in cpus_allowed_l:\n if core_id in l3cache_topo_d[\"l3cache_ids\"][l3cache_id]:\n if l3cache_id not in l3caches_l:\n l3caches_l.append(l3cache_id)\n if len(l3caches_l) > 1:\n cond = True\n else:\n cond = False\n return (cond,l3caches_l)\n \n\n\ndef check_threads_l3cache(total_num_processes, total_num_threads, l3cache_topo_d, process_d):\n if l3cache_topo_d:\n threads_per_process = total_num_threads / total_num_processes\n if threads_per_process > 1.0:\n for pid in process_d[\"pids\"]:\n cpus_allowed = process_d[\"pids\"][pid][\"cpus_allowed\"]\n cpus_allowed_l = range_to_list(cpus_allowed)\n (not_single_l3cache, l3caches_l) = not_in_l3cache(cpus_allowed_l, l3cache_topo_d)\n if not_single_l3cache:\n print(\"Warning: threads corresponding to process {} are mapped to multiple L3cache(s) ({})\".format(pid,l3caches_l))\n break\n\n\ndef check_app(topo_d, process_d, l3cache_topo_d):\n print(\"\")\n print(\"\")\n total_num_processes = calc_total_num_processes(process_d)\n total_num_numa_domains = calc_total_num_numas(topo_d)\n total_num_l3caches = calc_total_num_l3caches(l3cache_topo_d)\n total_num_cores = calc_total_num_cores(topo_d)\n total_num_threads = calc_total_num_threads(process_d)\n total_num_gpus = calc_total_num_gpus(topo_d)\n\n check_total_threads(total_num_cores, total_num_threads)\n check_size_core_map_domain(process_d)\n check_numa_per_process(process_d)\n check_process_numa_distribution(total_num_processes, total_num_numa_domains, process_d)\n check_thread_to_gpu(total_num_threads, total_num_gpus)\n check_processes_to_l3cache(total_num_processes, total_num_l3caches, l3cache_topo_d, process_d)\n check_threads_l3cache(total_num_processes, total_num_threads, l3cache_topo_d, process_d)\n\n\ndef range_to_list(range_str):\n range_str_l = range_str.split(\"-\")\n if len(range_str_l) == 2:\n return range(int(range_str_l[0]), int(range_str_l[1])+1)\n elif len(range_str_l) == 1:\n return list(map(int,range_str_l))\n else:\n print(\"Error: function range_to_list does not support {}\".format(range_str))\n\n\ndef ranges(i):\n for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):\n b = list(b)\n yield b[0][1], b[-1][1]\n\n\ndef conv_ranges(range_l):\n range_str_l = []\n for item in range_l:\n if item[0] == item[1]:\n range_str = item[0]\n else:\n range_str = str(item[0]) + \"-\" + str(item[1])\n range_str_l.append(range_str)\n return range_str_l\n\n\ndef list_to_ranges(l):\n if len(l) == 1:\n return l\n else:\n range_l = list(ranges(l))\n range_str_l = conv_ranges(range_l)\n return range_str_l\n\n\ndef report(app_pattern, topo_d, process_d, sku_name, l3cache_topo_d):\n hostname = socket.gethostname()\n print(\"\")\n print(\"Virtual Machine ({}) Numa topology\".format(sku_name))\n print(\"\")\n print(\"{:<12} {:<20} {:<10}\".format(\"NumaNode id\",\"Core ids\", \"GPU ids\"))\n print(\"{:=<12} {:=<20} {:=<10}\".format(\"=\",\"=\", \"=\"))\n for numnode_id in topo_d[\"numanode_ids\"]:\n core_ids_l = str(list_to_ranges(topo_d[\"numanode_ids\"][numnode_id][\"core_ids\"]))\n gpu_ids_l = str(list_to_ranges(topo_d[\"numanode_ids\"][numnode_id][\"gpu_ids\"]))\n print(\"{:<12} {:<20} {:<10}\".format(numnode_id,core_ids_l, gpu_ids_l))\n print(\"\")\n if l3cache_topo_d:\n print(\"{:<12} {:<20}\".format(\"L3Cache id\",\"Core ids\"))\n print(\"{:=<12} {:=<20}\".format(\"=\",\"=\"))\n for l3cache_id in l3cache_topo_d[\"l3cache_ids\"]:\n core_ids_l = str(list_to_ranges(l3cache_topo_d[\"l3cache_ids\"][l3cache_id]))\n print(\"{:<12} {:<20}\".format(l3cache_id,core_ids_l))\n print(\"\")\n print(\"\")\n print(\"Application ({}) Mapping/pinning\".format(app_pattern))\n print(\"\")\n print(\"{:<12} {:<17} {:<17} {:<15} {:<17} {:<15} {:<15}\".format(\"PID\",\"Threads\",\"Running Threads\",\"Last core id\",\"Core id mapping\",\"Numa Node ids\", \"GPU ids\"))\n print(\"{:=<12} {:=<17} {:=<17} {:=<15} {:=<17} {:=<15} {:=<15}\".format(\"=\",\"=\",\"=\",\"=\",\"=\",\"=\",\"=\"))\n for pid in process_d[\"pids\"]:\n threads = process_d[\"pids\"][pid][\"num_threads\"]\n running_threads = process_d[\"pids\"][pid][\"running_threads\"]\n last_core_id = process_d[\"pids\"][pid][\"last_core_id\"]\n cpus_allowed = process_d[\"pids\"][pid][\"cpus_allowed\"]\n numas = str(list_to_ranges(process_d[\"pids\"][pid][\"numas\"]))\n gpus = str(list_to_ranges(process_d[\"pids\"][pid][\"gpus\"]))\n print(\"{:<12} {:<17} {:<17} {:<15} {:<17} {:<15} {:<15}\".format(pid,threads,running_threads,last_core_id,cpus_allowed,numas,gpus))\n\n\ndef main():\n vm_metadata = get_vm_metadata()\n sku_name = vm_metadata[\"compute\"][\"vmSize\"]\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(dest=\"application_pattern\", type=str, default=\"None\", help=\"Select the application pattern to check [string]\")\n args = parser.parse_args()\n if args.application_pattern:\n app_pattern = args.application_pattern\n topo_d = parse_lstopo()\n l3cache_topo_d = create_l3cache_topo(sku_name)\n# print(l3cache_topo_d)\n pids_l = find_pids(app_pattern)\n process_d = find_threads(pids_l)\n find_process_numas(topo_d, process_d)\n find_process_gpus(topo_d, process_d)\n find_last_core_id(process_d)\n report(app_pattern, topo_d, process_d, sku_name, l3cache_topo_d)\n check_app(topo_d, process_d, l3cache_topo_d)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "experimental/check_app_pinning_tool/check_app_pinning.py", "file_name": "check_app_pinning.py", "file_ext": "py", "file_size_in_byte": 24514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "urllib.request.Request", "line_number": 128, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 132, "usage_type": "call"}, {"api_name": "json.load", "line_number": 135, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 158, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 161, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 184, "usage_type": "call"}, {"api_name": "re.search", "line_number": 186, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 188, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 209, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 209, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 213, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "itertools.groupby", "line_number": 470, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 496, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 532, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 532, "usage_type": "attribute"}]} +{"seq_id": "178106378", "text": "from copy import deepcopy\nimport curses\nimport random\nimport time\ns=curses.initscr()\ncurses.beep()\ncurses.beep()\n#sh, sw = s.getmaxyx()\nsh=25\nsw=sh\n\ncurses.noecho()\ncurses.curs_set(0)\n\n\nkey_list= [curses.KEY_RIGHT,curses.KEY_LEFT,curses.KEY_DOWN,curses.KEY_UP,curses.KEY_BACKSPACE]\n\nspeed = 100 #ms\nsnk_y=sh//2\nsnk_x=4\n\n\nclass game:\n\tdef __init__(self):\n\t\tself.snake = [\n\t\t\t[snk_y, snk_x],\n\t\t\t[snk_y, snk_x-1],\n\t\t\t[snk_y, snk_x-2]\n\t\t]\n\t\tself.food = [random.randint(1,sh-2),random.randint(1,sw-2)]\n\t\twhile self.food in self.snake:\n\t\t\t\tself.food = [random.randint(1,sh-2),random.randint(1,sw-2)]\n\t\tself.score = 0\n\t\tself.highscore = 0\n\t\tself.window = curses.newwin(sh,sw, 0, 0)\n\t\tself.window.timeout(speed)\n\t\tself.window.keypad(1)\n\t\tself.window.scrollok(0)\n\t\tself.key = curses.KEY_RIGHT\n\t\tself.n_frames=0\n\t\tself.findpath()\n\t\t\n\tdef frame(self):\n\t\tself.window.clear()\n\t\tself.window.border(0)\n\n\t\tif self.snake[0] in self.path:\n\t\t\tdel self.path[self.path.index(self.snake[0]):]\n\n\t\tself.window.addch(self.food[0],self.food[1],'+')\n\t\tself.window.addstr(0,0,str(self.score))\n\t\tself.window.addstr(0,sw-3,str(self.highscore))\n\t\tself.printsnake()\n\t\t\n\t\tnewkey = (self.window.getch())\n\t\tif newkey != -1 and newkey in key_list:\n\t\t\tif newkey == curses.KEY_BACKSPACE:\n\t\t\t\tcurses.endwin()\n\t\t\t\tquit()\n\t\t\tself.key = newkey\n\t\telse:\n\t\t\tself.key = self.getdirection()\n\n\n\t\tif self.snake [0] == self.food:\n\t\t\tself.food = [random.randint(1,sh-2),random.randint(1,sw-2)]\n\t\t\twhile self.food in self.snake:\n\t\t\t\tself.food = [random.randint(1,sh-2),random.randint(1,sw-2)]\n\t\t\tself.window.addch(self.food[0], self.food[1],'+')\n\t\t\tself.findpath()\n\t\t\tself.key = self.getdirection()\n\t\t\tself.score += 1\n\t\t\tself.bewegen()\n\t\telse:\n\t\t\tself.snake.pop()\n\t\t\tself.bewegen()\n\t\tself.checkbreak()\n\t\tself.n_frames+=1\n\t\t\n\tdef rungame(self):\n\t\twhile True:\n\t\t\tself.frame()\n\n\tdef endgame(self):\n\t\tself.snake = [\n\t\t\t[snk_y, snk_x],\n\t\t\t[snk_y, snk_x-1],\n\t\t\t[snk_y, snk_x-2]\n\t\t]\n\t\tself.food = [random.randint(1,sh-2),random.randint(1,sw-2)]\n\t\twhile self.food in self.snake:\n\t\t\tself.food = [random.randint(1,sh-2),random.randint(1,sw-2)]\n\t\tself.findpath()\n\t\tif self.score > self.highscore:\n\t\t\tself.highscore = self.score\n\t\tself.score = 0\n\t\tself.window = curses.newwin(sh,sw, 0, 0)\n\t\tself.window.timeout(speed)\n\t\tself.window.keypad(1)\n\t\tself.key = curses.KEY_RIGHT\n\t\tself.n_frames=0\n\n\tdef printsnake(self):\n\t\tfor i in range(len(self.snake)):\n\t\t\tself.window.addch(self.snake[i][0],self.snake[i][1],'#')\n\n\tdef bewegen(self):\n\t\thead=self.snake[0]\n\t\tif self.key == curses.KEY_DOWN:\n\t\t\tself.snake.insert(0, [head[0]+1,head[1]])\n\t\telif self.key == curses.KEY_UP:\n\t\t\tself.snake.insert(0, [head[0]-1,head[1]])\n\t\telif self.key == curses.KEY_RIGHT:\n\t\t\tself.snake.insert(0, [head[0],head[1]+1])\n\t\telif self.key == curses.KEY_LEFT:\n\t\t\tself.snake.insert(0, [head[0],head[1]-1])\n\t\t\t\n\tdef checkbreak(self):\n\t\tend = False\n\t\tif self.snake[0] [0] == 0 or self.snake[0][0]== sh-1 or self.snake[0] [1] == 0 or self.snake[0][1]== sw-1:\n\t\t\tend = True\n\t\tif self.snake [0] in self.snake[1:]:\n\t\t\tend = True\n\t\tif end:\n\t\t\tself.window.addch(self.food[0],self.food[1],'+')\n\t\t\ttime.sleep(0.5)\n\t\t\tself.endgame()\n\n\tdef findpath(self):\n\t\tself.options = [[self.snake[0]]]\n\t\tself.checked = [self.snake[0]]\n\n\t\tself.find(self.snake[0],[self.snake[0]])\n\n\t\tself.path = self.options[0]\n\t\tself.options.remove([self.snake[0]])\n\n\t\tfor i in range(len(self.options)):\n\t\t\tusable = True\n\n\t\t\tfor j in range(len(self.options[i])):\n\t\t\t\tif self.options[i][j] in self.snake:\n\t\t\t\t\tusable = False\n\t\t\tif len(self.path) > len(self.options[i]) and usable == True:\n\t\t\t\tself.path = self.options[i]\n\t\t\t\n\tdef find(self,coursor,weg):\n\t\tif coursor == self.food:\n\t\t\tif self.snake[0] in weg:\n\t\t\t\tweg.remove(self.snake[0])\n\t\t\t#if self.food in weg:\n\t\t\t#\tweg.remove(self.food)\n\t\t\ttemp = deepcopy(weg)\n\t\t\tself.options.insert(0,temp)\n\t\telse:\n\t\t\tif coursor[0]+10 and [coursor[0]-1,coursor[1]] not in self.snake and [coursor[0]-1,coursor[1]] not in weg:\n\t\t\t\t\tweg.insert(0,[coursor[0]-1,coursor[1]])\n\t\t\t\t\tself.find([coursor[0]-1,coursor[1]],weg)\n\t\t\t\n\t\t\tif coursor[1]+10 and [coursor[0],coursor[1] -1] not in self.snake and [coursor[0],coursor[1]-1] not in weg:\n\t\t\t\t\tweg.insert(0,[coursor[0],coursor[1]-1])\n\t\t\t\t\tself.find([coursor[0],coursor[1]-1],weg)\t\n\n\tdef getdirection(self):\n\t\tself.printpath()\n\t\tif self.snake[0] != self.food:\n\t\t\tif self.snake[0] in self.path:\n\t\t\t\ti = self.path.index(self.snake[0])\n\t\t\t\tif self.path[i+1][0] > self.snake[0][0]:\n\t\t\t\t\treturn curses.KEY_DOWN\n\t\t\t\telif self.path[i+1][0] < self.snake[0][0]:\n\t\t\t\t\treturn curses.KEY_UP\n\t\t\t\telif self.path[i+1][1] > self.snake[0][1]:\n\t\t\t\t\treturn curses.KEY_RIGHT\n\t\t\t\telif self.path[i+1][1] < self.snake[0][1]:\n\t\t\t\t\treturn curses.KEY_LEFT\n\t\t\telse:\n\n\t\t\t\tif self.path[0][0] > self.snake[0][0]:\n\t\t\t\t\treturn curses.KEY_DOWN\n\t\t\t\telif self.path[0][0] < self.snake[0][0]:\n\t\t\t\t\treturn curses.KEY_UP\n\t\t\t\telif self.path[0][1] > self.snake[0][1]:\n\t\t\t\t\treturn curses.KEY_RIGHT\n\t\t\t\telif self.path[0][1] < self.snake[0][1]:\n\t\t\t\t\treturn curses.KEY_LEFT\n\t\telse:\n\t\t\treturn self.key\t\t\t\n\n\tdef printpath(self):\n\t\tfor i in range(len(self.path)):\n\t\t\tself.window.addch(self.path[i][0],self.path[i][1],'x')\n\n\ns = game()\n\ns.rungame()\n", "sub_path": "selfsolvingsnake_bad.py", "file_name": "selfsolvingsnake_bad.py", "file_ext": "py", "file_size_in_byte": 5563, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "curses.initscr", "line_number": 5, "usage_type": "call"}, {"api_name": "curses.beep", "line_number": 6, "usage_type": "call"}, {"api_name": "curses.beep", "line_number": 7, "usage_type": "call"}, {"api_name": "curses.noecho", "line_number": 12, "usage_type": "call"}, {"api_name": "curses.curs_set", "line_number": 13, "usage_type": "call"}, {"api_name": "curses.KEY_RIGHT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 16, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 16, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "curses.newwin", "line_number": 35, "usage_type": "call"}, {"api_name": "curses.KEY_RIGHT", "line_number": 39, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "curses.endwin", "line_number": 58, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 66, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 90, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "curses.newwin", "line_number": 97, "usage_type": "call"}, {"api_name": "curses.KEY_RIGHT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 109, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 111, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 153, "usage_type": "call"}, {"api_name": "curses.KEY_DOWN", "line_number": 178, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 180, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 184, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 188, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 190, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 192, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 194, "usage_type": "attribute"}]} +{"seq_id": "502666400", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport tkinter\nimport turtle\nfrom typing import Dict\n\nlogger = logging.getLogger(__name__)\n\n\nclass Arm:\n def __init__(self, length: int, child_arm=None):\n self.length = length\n self.child_arm = child_arm\n\n def draw(self, kame: turtle.RawTurtle, degree, color='black'):\n mag = PigpioGUI.mag\n kame.setheading(270)\n kame.lt(degree)\n kame.color(color)\n kame.fd(self.length * mag)\n if self.child_arm:\n kame.setheading(270)\n kame.fd(self.child_arm.length * mag)\n\n\nclass Leg:\n\n FACE_FRONT = 1\n FACE_BACK = -1\n\n def __init__(self, height, arm_pos, arm: Arm, face, pos=(0, 0)):\n self.height = height\n self.arm_pos = arm_pos\n self.arm = arm\n self.pulsewidth = 500\n if face < 0:\n self.face = -1\n else:\n self.face = 1\n self.pos = pos\n\n def setup(self, screen, arm_color='black'):\n self.screen = screen\n self.kame = turtle.RawTurtle(self.screen, visible=False)\n self.arm_color = arm_color\n self.draw_leg()\n self.degree = 0\n\n def draw_leg(self):\n mag = PigpioGUI.mag\n self.kame.reset()\n self.kame.width(8)\n self.kame.ht()\n self.kame.speed(0)\n self.kame.up()\n self.kame.setpos(self.pos[0], self.pos[1] - 80)\n self.kame.down()\n self.kame.lt(90)\n self.kame.fd(self.height * mag)\n self.kame.bk((self.height - self.arm_pos) * mag)\n self.screen.update()\n\n def draw_arm(self, degree):\n self.arm.draw(self.kame, degree * self.face, self.arm_color)\n\n def set_servo_pulsewidth(self, pulsewidth):\n if self.pulsewidth == pulsewidth:\n return\n self.pulsewidth = pulsewidth\n degree = round(pulsewidth - 500) / 11.11111\n self.draw_leg()\n self.draw_arm(degree)\n self.screen.update()\n\n\nclass PigpioGUI:\n\n mag = 10\n _colors = [\n 'red', 'orange', 'spring green', 'blue', 'purple', 'RosyBrown',\n 'sea green', 'turquoise', 'maroon', 'firebrick', 'cyan', 'coral',\n 'DarkGoldenrod', 'plum4', 'tan4'\n ]\n\n def __init__(self, screen: turtle.TurtleScreen, legs: Dict[int, Leg]):\n self.legs = legs\n for gpio, leg in legs.items():\n leg.setup(screen, self._colors[gpio])\n\n def set_mode(self, gpio: int, mode: int):\n logger.debug('set_mode: %s %s' % (gpio, mode))\n\n def set_servo_pulsewidth(self, gpio: int, pulsewidth: int):\n leg = self.legs[gpio]\n leg.set_servo_pulsewidth(pulsewidth)\n\n def stop(self):\n pass\n\n\nclass PigpioGUIProvider:\n\n screen = None\n\n OUTPUT = 1\n\n def __init__(self, screen):\n self.screen = screen\n\n def buildLegs(self):\n pos = (0, 0)\n return {\n # LEFT\n 2: Leg(10, 8, Arm(5), Leg.FACE_FRONT, pos),\n 3: Leg(10, 8, Arm(5), Leg.FACE_FRONT, pos),\n 4: Leg(10, 8, Arm(5), Leg.FACE_FRONT, pos),\n 5: Leg(10, 8, Arm(5, Arm(3)), Leg.FACE_BACK, pos),\n\n # RIGHT\n 6: Leg(10, 8, Arm(5), Leg.FACE_BACK, pos),\n 7: Leg(10, 8, Arm(5), Leg.FACE_FRONT, pos),\n 8: Leg(10, 8, Arm(5), Leg.FACE_BACK, pos),\n 9: Leg(10, 8, Arm(5, Arm(3)), Leg.FACE_BACK, pos),\n\n # 10: Leg(10, 8, Arm(5), Leg.FACE_FRONT, pos),\n # 11: Leg(10, 8, Arm(5), Leg.FACE_BACK, pos),\n }\n\n def pi(self):\n return PigpioGUI(self.screen, self.buildLegs())\n\n\nif __name__ == '__main__':\n import pigpio_provider\n\n root = tkinter.Tk()\n canvas = tkinter.Canvas(root)\n screen = turtle.TurtleScreen(canvas)\n screen.tracer(0, 0)\n\n stub = pigpio_provider.pi('GUI', screen)\n\n canvas.pack()\n\n root.after(200, stub.set_servo_pulsewidth, 2, 750)\n root.after(400, stub.set_servo_pulsewidth, 3, 1500)\n root.after(600, stub.set_servo_pulsewidth, 4, 500)\n root.after(800, stub.set_servo_pulsewidth, 2, 900)\n root.after(1000, stub.set_servo_pulsewidth, 6, 900)\n root.after(1200, stub.set_servo_pulsewidth, 7, 900)\n\n root.mainloop()", "sub_path": "pigpio_gui.py", "file_name": "pigpio_gui.py", "file_ext": "py", "file_size_in_byte": 4144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "turtle.RawTurtle", "line_number": 15, "usage_type": "attribute"}, {"api_name": "turtle.RawTurtle", "line_number": 44, "usage_type": "call"}, {"api_name": "turtle.TurtleScreen", "line_number": 85, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 85, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 136, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 137, "usage_type": "call"}, {"api_name": "turtle.TurtleScreen", "line_number": 138, "usage_type": "call"}, {"api_name": "pigpio_provider.pi", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "453489431", "text": "\"\"\" Models the distribution of in-sample Sharpe ratios realized by authors. \"\"\"\n\nimport random\nimport warnings\nimport json\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.covariance import LedoitWolf\nimport pymc3 as pm\nimport theano.tensor as tt\nimport xarray as xr\nfrom .serialize import to_xarray\nfrom ._version import get_versions\nfrom .base import BayesAlphaResult\n\nAUTHOR_MODEL_TYPE = 'author-model'\nAPPROX_BDAYS_PER_YEAR = 252\n\n\nclass AuthorModelBuilder(object):\n \"\"\" Class to build the author model. \"\"\"\n\n def __init__(self, sharpes, returns):\n \"\"\"\n Initialize AuthorModelBuilder object.\n\n Parameters\n ----------\n sharpes : pd.DataFrame\n Long-format DataFrame of in-sample Sharpe ratios (from user-run\n backtests), indexed by user, algorithm and code ID.\n Note that currently, backtests are deduplicated based on code id.\n See fit_authors for more information.\n \"\"\"\n self.num_authors = sharpes.meta_user_id.nunique()\n self.num_algos = sharpes.meta_algorithm_id.nunique()\n # For num_backtests, nunique() and count() should be the same\n self.num_backtests = sharpes.meta_code_id.nunique()\n\n # Which algos correspond to which authors?\n df = (sharpes.loc[:, ['meta_user_id', 'meta_algorithm_id']]\n .drop_duplicates(subset='meta_algorithm_id', keep='first')\n .reset_index()\n .meta_user_id\n .astype(str))\n self.author_to_algo_encoding = LabelEncoder().fit_transform(df)\n\n # Which backtests correspond to which algos?\n df = sharpes.meta_algorithm_id.astype(str)\n self.algo_to_backtest_encoding = LabelEncoder().fit_transform(df)\n\n # Which backtests correspond to which authors?\n df = sharpes.meta_user_id.astype(str)\n self.author_to_backtest_encoding = LabelEncoder().fit_transform(df)\n\n # Construct correlation matrix.\n # 0 is a better estimate for mean returns than the sample mean!\n returns_ = returns / returns.std()\n self.corr = LedoitWolf(assume_centered=True).fit(returns_).covariance_\n\n self.model = self._build_model(sharpes, self.corr)\n\n self.coords = {\n 'meta_user_id': sharpes.meta_user_id.drop_duplicates().values,\n 'meta_algorithm_id': sharpes.meta_algorithm_id.drop_duplicates().values,\n 'meta_code_id': sharpes.meta_code_id.values\n }\n\n self.dims = {\n 'mu_global': (),\n 'mu_author': ('meta_user_id', ),\n 'mu_author_raw': ('meta_user_id', ),\n 'mu_author_sd': (),\n 'mu_algo': ('meta_algorithm_id', ),\n 'mu_algo_raw': ('meta_algorithm_id', ),\n 'mu_algo_sd': (),\n 'mu_backtest': ('meta_code_id', ),\n 'sigma_backtest': ('meta_code_id', ),\n 'alpha_author': ('meta_user_id', ),\n 'alpha_algo': ('meta_algorithm_id', )\n }\n\n def _build_model(self, sharpes, corr):\n \"\"\"\n Build the entire author model (in one function). The model is\n sufficiently simple to specify entirely in one function.\n\n Parameters\n ----------\n sharpes : pd.DataFrame\n Long-format DataFrame of in-sample Sharpe ratios (from user-run\n backtests), indexed by user, algorithm and code ID.\n Note that currently, backtests are deduplicated based on code id.\n See fit_authors for more information.\n corr : np.ndarray\n Correlation matrix of returns streams (from backtests), estimated\n using Ledoit-Wolf shrinkage.\n See fit_authors for more information.\n \"\"\"\n with pm.Model() as model:\n mu_global = pm.Normal('mu_global', mu=0, sd=3)\n\n mu_author_sd = pm.HalfNormal('mu_author_sd', sd=1)\n mu_author_raw = pm.Normal('mu_author_raw', mu=0, sd=1,\n shape=self.num_authors)\n mu_author = pm.Deterministic('mu_author',\n mu_author_sd * mu_author_raw)\n\n mu_algo_sd = pm.HalfNormal('mu_algo_sd', sd=1)\n mu_algo_raw = pm.Normal('mu_algo_raw', mu=0, sd=1,\n shape=self.num_algos)\n mu_algo = pm.Deterministic('mu_algo', mu_algo_sd * mu_algo_raw)\n\n mu_backtest = \\\n pm.Deterministic('mu_backtest',\n mu_global\n + mu_author[self.author_to_backtest_encoding]\n + mu_algo[self.algo_to_backtest_encoding])\n\n sigma_backtest = pm.Deterministic(\n 'sigma_backtest',\n tt.sqrt(APPROX_BDAYS_PER_YEAR / sharpes.meta_trading_days)\n )\n\n cov = corr * sigma_backtest[:, None] * sigma_backtest[None, :]\n\n alpha_author = pm.Deterministic('alpha_author',\n mu_global + mu_author)\n\n alpha_algo = \\\n pm.Deterministic('alpha_algo',\n mu_global\n + mu_author[self.author_to_algo_encoding]\n + mu_algo)\n\n sharpe = pm.MvNormal('sharpe',\n mu=mu_backtest,\n cov=cov,\n shape=self.num_backtests,\n observed=sharpes.sharpe_ratio)\n\n return model\n\n\nclass AuthorModelResult(BayesAlphaResult):\n def rebuild_model(self, sharpes=None, returns=None):\n \"\"\" Return an AuthorModelBuilder that recreates the original model. \"\"\"\n if sharpes is None:\n sharpes = (self.trace\n ._sharpes\n .to_pandas()\n .reset_index()\n .copy())\n\n if returns is None:\n returns = (self.trace\n ._returns\n .to_pandas()\n .copy())\n\n return AuthorModelBuilder(sharpes, returns)\n\n\ndef fit_authors(sharpes,\n returns,\n sampler_type='mcmc',\n sampler_args=None,\n seed=None,\n save_data=True,\n **params):\n \"\"\"\n Fit author model to population of authors, with algos and backtests.\n\n Parameters\n ----------\n sharpes : pd.DataFrame\n Long-format DataFrame of in-sample Sharpe ratios (from user-run\n backtests), indexed by user, algorithm and code ID.\n Note that currently, backtests are deduplicated based on code id.\n ::\n meta_user_id meta_algorithm_id meta_code_id meta_trading_days sharpe_ratio\n 0 abcdef123456 ghijkl789123 abcdef000000 136 0.919407\n 1 abcdef123456 ghijkl789123 abcdef000001 271 1.129353\n 2 abcdef123456 ghijkl789123 abcdef000002 229 -0.005934\n\n returns : pd.DataFrame\n Wide-format DataFrame of in-sample returns of user-run backtests,\n indexed by time. Columns are code ids, rows are time (the format of\n time does not matter).\n ::\n abcd1234 efgh5678 ijkl9123\n 2013-06-03 -0.000326 0.002815 0.002110\n 2013-06-04 0.000326 -0.000135 -0.001211\n 2013-06-05 0.000326 0.001918 0.002911\n\n sampler_type : str\n Whether to use Markov chain Monte Carlo or variational inference.\n Either 'mcmc' or 'vi'. Defaults to 'mcmc'.\n sampler_args : dict\n Additional parameters for `pm.sample`.\n save_data : bool\n Whether to store the dataset in the result object.\n seed : int\n Seed for random number generation in PyMC3.\n \"\"\"\n if params:\n raise ValueError('Unnecessary kwargs passed to fit_authors.')\n\n if sampler_type not in {'mcmc', 'vi'}:\n raise ValueError(\"sampler_type not in {'mcmc', 'vi'}\")\n\n # Check data\n _check_data(sharpes, returns)\n\n if seed is None:\n seed = int(random.getrandbits(31))\n else:\n seed = int(seed)\n\n builder = AuthorModelBuilder(sharpes, returns)\n model, coords, dims = builder.model, builder.coords, builder.dims\n\n timestamp = datetime.isoformat(datetime.now())\n\n with model:\n args = {} if sampler_args is None else sampler_args\n\n with warnings.catch_warnings(record=True) as warns:\n if sampler_type == 'mcmc':\n trace = pm.sample(**args)\n else:\n trace = pm.fit(**args).sample(args.get('draws', 500))\n\n if warns:\n warnings.warn('Problems during sampling. Inspect `result.warnings`.')\n\n trace = to_xarray(trace, coords, dims)\n # Author model takes no parameters, so this will always be empty.\n trace.attrs['params'] = json.dumps(params)\n trace.attrs['timestamp'] = timestamp\n trace.attrs['warnings'] = json.dumps([str(warn) for warn in warns])\n trace.attrs['seed'] = seed\n trace.attrs['model-version'] = get_versions()['version']\n trace.attrs['model-type'] = AUTHOR_MODEL_TYPE\n\n if save_data:\n # Store the data in long format to avoid creating more dimensions\n trace['_sharpes'] = xr.DataArray(sharpes, dims=['sharpes_index',\n 'sharpes_columns'])\n trace['_returns'] = xr.DataArray(returns, dims=['returns_index',\n 'returns_columns'])\n\n return AuthorModelResult(trace)\n\n\ndef _check_data(sharpes, returns):\n \"\"\"\n Run basic sanity checks on the data set.\n\n Parameters\n ----------\n sharpes : pd.DataFrame\n Long-format DataFrame of in-sample Sharpe ratios (from user-run\n backtests), indexed by user, algorithm and code ID.\n Note that currently, backtests are deduplicated based on code id.\n See fit_authors for more information.\n returns : pd.DataFrame\n Wide-format DataFrame of in-sample returns of user-run backtests,\n indexed by time. Columns are code ids, rows are time (the format of\n time does not matter).\n See fit_authors for more information.\n \"\"\"\n\n # FIXME deduplicating based on code id is not perfect. Ideally we would\n # deduplicate on backtest id.\n if sharpes.meta_code_id.nunique() != sharpes.shape[0]:\n warnings.warn('Data set contains duplicate backtests.')\n\n if (sharpes.groupby('meta_algorithm_id')['sharpe_ratio']\n .count() < 5).any():\n warnings.warn('Data set contains algorithms with fewer than 5 '\n 'backtests.')\n\n if (sharpes.groupby('meta_user_id')['meta_algorithm_id'].nunique() < 5).any():\n warnings.warn('Data set contains users with fewer than 5 algorithms.')\n\n if ((sharpes.sharpe_ratio > 20)\n | (sharpes.sharpe_ratio < -20)).any():\n raise ValueError('`sharpes` contains unrealistic values: greater than '\n '20 in magnitude.')\n\n if pd.isnull(sharpes).any().any():\n raise ValueError('`sharpes` contains NaNs.')\n\n # FIXME remove this check once all feature factory features are debugged.\n if (sharpes == -99999).any().any():\n raise ValueError('`sharpes` contains -99999s.')\n\n if pd.isnull(returns).any().any():\n raise ValueError('`returns` contains NaNs.')\n\n if returns.columns.duplicated().any():\n raise ValueError('`returns` contains duplicated code ids.')\n\n if len(sharpes.meta_code_id) != len(returns.columns):\n raise ValueError('`sharpes` and `returns` are different lengths.')\n\n if not set(sharpes.meta_code_id) == set(returns.columns):\n raise ValueError('`sharpes` and `returns` are the same length, but '\n 'contain different code ids.')\n\n if not (sharpes.meta_code_id == returns.columns).all():\n raise ValueError('`sharpes` and `returns` contain the same code ids, '\n 'but are ordered differently.')\n", "sub_path": "bayesalpha/author_model.py", "file_name": "author_model.py", "file_ext": "py", "file_size_in_byte": 12234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.covariance.LedoitWolf", "line_number": 62, "usage_type": "call"}, {"api_name": "pymc3.Model", "line_number": 103, "usage_type": "call"}, {"api_name": "pymc3.Normal", "line_number": 104, "usage_type": "call"}, {"api_name": "pymc3.HalfNormal", "line_number": 106, "usage_type": "call"}, {"api_name": "pymc3.Normal", "line_number": 107, "usage_type": "call"}, {"api_name": "pymc3.Deterministic", "line_number": 109, "usage_type": "call"}, {"api_name": "pymc3.HalfNormal", "line_number": 112, "usage_type": "call"}, {"api_name": "pymc3.Normal", "line_number": 113, "usage_type": "call"}, {"api_name": "pymc3.Deterministic", "line_number": 115, "usage_type": "call"}, {"api_name": "pymc3.Deterministic", "line_number": 118, "usage_type": "call"}, {"api_name": "pymc3.Deterministic", "line_number": 123, "usage_type": "call"}, {"api_name": "theano.tensor.sqrt", "line_number": 125, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 125, "usage_type": "name"}, {"api_name": "pymc3.Deterministic", "line_number": 130, "usage_type": "call"}, {"api_name": "pymc3.Deterministic", "line_number": 134, "usage_type": "call"}, {"api_name": "pymc3.MvNormal", "line_number": 139, "usage_type": "call"}, {"api_name": "base.BayesAlphaResult", "line_number": 148, "usage_type": "name"}, {"api_name": "random.getrandbits", "line_number": 219, "usage_type": "call"}, {"api_name": "datetime.datetime.isoformat", "line_number": 226, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 226, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 226, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 231, "usage_type": "call"}, {"api_name": "pymc3.sample", "line_number": 233, "usage_type": "call"}, {"api_name": "pymc3.fit", "line_number": 235, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 238, "usage_type": "call"}, {"api_name": "serialize.to_xarray", "line_number": 240, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 242, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 244, "usage_type": "call"}, {"api_name": "_version.get_versions", "line_number": 246, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 251, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 253, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 280, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 284, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 288, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 295, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "260148257", "text": "# import schedule \nimport threading\nimport time\nfrom .models import JobCategory, WorkDetails\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\n\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n\n\ndef weworkSrcipe():\n url = \"https://weworkremotely.com/remote-jobs/search?term=remote\"\n html = requests.get(url, headers=headers).text\n soup = BeautifulSoup(html, 'lxml')\n section = soup.find_all(\"section\", {\"class\": \"jobs\"})\n\n for item in section:\n a = item.select(\"li > a\")\n print(a)\n\n for item2 in a:\n if str(item2.parent['class']) != \"['view-all']\":\n link = f\"https://weworkremotely.com{item2.get('href')}\"\n htmll = requests.get(link, headers=headers).text\n soupp = BeautifulSoup(htmll, 'lxml')\n des = soupp.find(\"div\", attrs={\"class\": \"listing-container\"})\n\n\n desc = str(des)\n # print(desc)\n # p = des.find_all(\"div\")\n # paragraphs = []\n #\n # for x in p:\n # paragraphs.append(str(x))\n #\n # print(paragraphs)\n # stri = \"\"\n # for pa in paragraphs:\n # stri += pa\n # f = open('templates/created.html', 'w')\n #\n # f.write(stri)\n # f.close()\n try:\n company_logo = soupp.find_all('div', {\"class\": \"listing-logo\"})[0].find('img', src=True)\n except IndexError:\n continue\n posted_on = soupp.find('div', {'class': 'listing-header-container'})\n try:\n posted_on = posted_on.find('h3').get_text()\n except:\n posted_on = ''\n apply_link_div = soupp.find_all('div', {'class': 'apply_tooltip'})[0].find('a', href=True)\n apply_links = apply_link_div['href'] # HERE IS THE APPLYABLE LINK #############\n company = item2.find(\"span\", {\"class\": \"company\"})\n title = item2.find(\"span\", {\"class\": \"title\"})\n try:\n locat = item2.find(\"span\", {\"class\": \"region company\"}).text\n except:\n locat=''\n print(str(locat))\n else:\n continue\n try:\n category = JobCategory.objects.all().first()\n except:\n category = JobCategory(name='Recent')\n category.save()\n job = WorkDetails(category=category, job_title=title.get_text(), posted_on=posted_on,\n job_desc=desc, apply_job_link=apply_links, company_name=company.get_text(),\n is_scraped_data=True, company_img_url=company_logo['src'],location=locat, verify_link=link\n )\n\n j = WorkDetails.objects.all()\n if (job not in j):\n job.save()\n else:\n pass\n \n\n\ndef start_stackoverflow_scrapes():\n title_texts = []\n\n descriptions = []\n jobs = []\n company_namess = []\n company_img = []\n\n url = f\"https://stackoverflow.com/jobs?r=true\"\n\n html = requests.get(url, headers=headers).text\n soup = BeautifulSoup(html, features='html.parser')\n\n pages = soup.find(\"div\", {\"class\": \"s-pagination\"}).find_all('a', {\"class\": \"s-pagination--item\"})\n pages.pop()\n # print(pages)\n for page in pages:\n print(page.text)\n if page.text != 'nextchevron_right':\n page_url = page['href']\n link1 = f\"https://stackoverflow.com{page_url}\"\n htmls = requests.get(link1, headers=headers).text\n soups = BeautifulSoup(htmls, features='html.parser')\n\n items = soups.find_all(\"a\", {\"class\": \"s-link stretched-link\"})\n print(len(items))\n for item in items:\n title_text = item.get_text()\n link2 = f\"https://stackoverflow.com{item.get('href')}\"\n\n htmll = requests.get(link2, headers=headers).text\n soupp = BeautifulSoup(htmll, features='html.parser')\n company_names = soupp.find('div', {'class': \"fc-black-700\"}).find('a')\n # print(company_names)\n des = soupp.find(\"section\", {\"class\": \"mb32 fs-body2 fc-medium\"})\n print(des)\n desc = str(des)\n time_of_post = soupp.find('ul', {\n \"class\": \"horizontal-list horizontal-list__lg fs-body1 fc-black-500 ai-baseline mb24\"})\n\n try:\n posted_on = time_of_post.find('li').get_text()\n except:\n posted_on = ''\n try:\n locat = soupp.find(\"span\", {\"class\": \"fc-black-500\"}).text\n\n except:\n locat=''\n apply_link = soupp.find_all('div', {'class': \"js-apply-container\"})[1].find('a', href=True)\n try:\n the_apply_link = apply_link['href'] # HERE IS THE APPLICABLE LINKS\n except:\n continue\n company_logo = soupp.find('div', {'class': 'grid--cell fl-shrink0'}).find('img', src=True)\n try:\n category = JobCategory.objects.all().first()\n except:\n category = JobCategory(name='Recent')\n category.save()\n\n job = WorkDetails(\n category=category, job_title=title_text, posted_on=posted_on, job_desc=desc,\n apply_job_link=the_apply_link, company_name=company_names.text, is_scraped_data=True,\n company_img_url=company_logo['src'],location=locat,verify_link=link2\n )\n j = WorkDetails.objects.all()\n if (job not in j):\n job.save()\n else:\n pass\n#\n#\n# def joson_response():\n# remoteok = 'https://remoteok.io/api'\n# result2 = requests.get(remoteok, headers=headers)\n# responses = result2.json()\n# try:\n# category = JobCategory.objects.all().first()\n# except:\n# category = JobCategory(name='Recent')\n# category.save()\n# for response in responses:\n# try:\n# job = WorkDetails(category=category, job_title=response['position'], posted_on=response['date'],\n# job_desc=response['description'], apply_job_link=response['url'],\n# company_name=response['company'], is_scraped_data=True,\n# company_img_url=response['company_logo']\n# )\n# job.save()\n# except:\n# pass\n\n\ndef Command():\n print('Called')\n WorkDetails.objects.filter(is_scraped_data=True).delete()\n start_stackoverflow_scrapes()\n weworkSrcipe()\n \n # joson_response()\n\n\ndef run_continuously(self, interval=1200):\n \"\"\"Continuously run, while executing pending jobs at each elapsed\n time interval.\n @return cease_continuous_run: threading.Event which can be set to\n cease continuous run.\n Please note that it is *intended behavior that run_continuously()\n does not run missed jobs*. For example, if you've registered a job\n that should run every minute and you set a continuous run interval\n of one hour then your job won't be run 60 times at each interval but\n only once.\n \"\"\"\n\n cease_continuous_run = threading.Event()\n\n class ScheduleThread(threading.Thread):\n\n @classmethod\n def run(cls):\n while not cease_continuous_run.is_set():\n self.run_pending()\n time.sleep(interval)\n\n continuous_thread = ScheduleThread()\n continuous_thread.setDaemon(True)\n continuous_thread.start()\n return cease_continuous_run\n\n\nBackgroundScheduler.run_continuously = run_continuously\n\n\ndef start_scheduler():\n \n scheduler = BackgroundScheduler()\n \n scheduler.add_job(Command, 'interval', hours = 12)\n scheduler.start()\n \n\n", "sub_path": "core/jobs.py", "file_name": "jobs.py", "file_ext": "py", "file_size_in_byte": 8363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 29, "usage_type": "call"}, {"api_name": "models.JobCategory.objects.all", "line_number": 70, "usage_type": "call"}, {"api_name": "models.JobCategory.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.JobCategory", "line_number": 70, "usage_type": "name"}, {"api_name": "models.JobCategory", "line_number": 72, "usage_type": "call"}, {"api_name": "models.WorkDetails", "line_number": 74, "usage_type": "call"}, {"api_name": "models.WorkDetails.objects.all", "line_number": 79, "usage_type": "call"}, {"api_name": "models.WorkDetails.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.WorkDetails", "line_number": 79, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 98, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 108, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 109, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 117, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 118, "usage_type": "call"}, {"api_name": "models.JobCategory.objects.all", "line_number": 143, "usage_type": "call"}, {"api_name": "models.JobCategory.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.JobCategory", "line_number": 143, "usage_type": "name"}, {"api_name": "models.JobCategory", "line_number": 145, "usage_type": "call"}, {"api_name": "models.WorkDetails", "line_number": 148, "usage_type": "call"}, {"api_name": "models.WorkDetails.objects.all", "line_number": 153, "usage_type": "call"}, {"api_name": "models.WorkDetails.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "models.WorkDetails", "line_number": 153, "usage_type": "name"}, {"api_name": "models.WorkDetails.objects.filter", "line_number": 183, "usage_type": "call"}, {"api_name": "models.WorkDetails.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "models.WorkDetails", "line_number": 183, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 202, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 204, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 210, "usage_type": "call"}, {"api_name": "apscheduler.schedulers.background.BackgroundScheduler.run_continuously", "line_number": 218, "usage_type": "attribute"}, {"api_name": "apscheduler.schedulers.background.BackgroundScheduler", "line_number": 218, "usage_type": "name"}, {"api_name": "apscheduler.schedulers.background.BackgroundScheduler", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "559552210", "text": "from random import random\nimport math\nimport yaml\n\nfrom preprocess_datafile import preprocess\nfrom utils import *\n\n\ndef discretize_result(dataset):\n \"\"\"\n Convert result to integer value and return key\n\n :param dataset: the dataset to convert the results\n :return: the key to convert from integer to class value\n \"\"\"\n result_col = len(dataset[0]) - 1\n\n # get the unique class values\n class_values = [row[result_col] for row in dataset]\n unique = set(class_values)\n\n # convert result values to discretized values\n lookup = {}\n for i, value in enumerate(unique):\n lookup[value] = i\n for row in dataset:\n row[result_col] = lookup[row[result_col]]\n\n # get the key to convert discretized values back to class values\n class_key = dict()\n for key, val in lookup.items():\n class_key[val] = key\n write_output(outputfile, f'class key: {class_key}')\n return class_key\n\n\ndef apply_class_key(dataset, class_key):\n \"\"\"\n Convert discretized result column back to class values\n\n :param dataset: the dataset to convert\n :param class_key: the key to convert from discretized values to class values\n :return: the converted dataset\n \"\"\"\n result_col = len(dataset[0]) - 1\n\n # create key to convert the discretized result column\n lookup = {}\n for key, val in class_key.items():\n lookup[val] = key\n\n # convert the discretized result column\n for row in dataset:\n row[result_col] = lookup[row[result_col]]\n return dataset\n\n\ndef normalize_regression_results(dataset):\n \"\"\"\n Normalize the result column for a regression dataset\n\n :param dataset: the dataset to normalize\n :return: the dataset with the normalized result column\n \"\"\"\n result_col = len(dataset[0]) - 1\n\n # get the range of the dataset\n min_val = float('inf')\n max_val = float('-inf')\n for row in dataset:\n if row[result_col] < min_val:\n min_val = row[result_col]\n elif row[result_col] > max_val:\n max_val = row[result_col]\n range = max_val - min_val\n\n # normalize the result column of the dataset within the calculated range\n for row in dataset:\n row[result_col] = (row[result_col] - min_val) / range\n return [min_val, max_val]\n\n\ndef apply_regression_range(dataset, regression_range):\n \"\"\"\n Extrapolate normalized result column to regression range (de-normalize)\n\n :param dataset: the dataset to de-normalize\n :param regression_range: the range of the result column\n :return: the de-normalized result column\n \"\"\"\n\n result_col = len(dataset[0]) - 1\n\n # get the min, max and range values from the regression range\n min_val = regression_range[0]\n max_val = regression_range[1]\n range = max_val - min_val\n\n # de-normalize the result column\n for row in dataset:\n row[result_col] = (row[result_col] - min_val) / range\n return dataset\n\n\ndef classification_accuracy(actual, predicted):\n \"\"\"\n Compute the number of incorrect classification instances as a percentage\n\n :param actual: the actual classification value list\n :param predicted: the predicted classification value list\n :return: the number of incorrect classifications as a percentage\n \"\"\"\n incorrect = 0\n\n # count the number of incorrect cases\n for i in range(len(actual)):\n if actual[i] != predicted[i]:\n incorrect += 1\n\n # return the number incorrect as a percentage of total cases\n return incorrect / float(len(actual)) * 100.0\n\n\ndef mse_accuracy(actual, predicted, regression_range):\n \"\"\"\n Compute the mean squared error between predicted and actual cases\n\n :param actual: the actual regression value list\n :param predicted: the predicted regression value list\n :param regression_range: the range of the regression result column\n :return: the mean squared error between the predicted and actual result sets\n \"\"\"\n mse = 0\n\n # compute mse for each of the predictions\n for i in range(len(actual)):\n # extrapolate predicted and actual values to de-normalized values\n delta = regression_range[1] - regression_range[0]\n actual_val = actual[i] * delta + regression_range[0]\n predicted_val = predicted[i][0] * delta + regression_range[0]\n\n # sum the mean squared error\n mse += (actual_val - predicted_val) ** 2\n return mse\n\n\ndef neuron_sigmoid_value(weights, inputs):\n \"\"\"\n Compute the sigmoid activation of a neuron\n\n :param weights: the weight values for the neuron\n :param inputs: the input values from the previous layer\n :return: the sigmoid activation of the neuron\n \"\"\"\n\n # initialize the activation to the bias value\n bias = weights[-1]\n activation = bias\n\n # add the activation values for each weight/input combination\n for i in range(len(weights) - 1):\n activation += weights[i] * inputs[i]\n\n if verbose:\n write_output(outputfile, f'node activation value: {activation}')\n write_output(outputfile, f'node sigmoid value: {1.0 / (1.0 + math.exp(-activation))}')\n\n # return the sigmoid activation of the neuron\n return 1.0 / (1.0 + math.exp(-activation))\n\n\ndef forward_propagate(network, inputs):\n \"\"\"\n propagate inputs through the trained network and show steps\n This method is functionally the same as forward_propagate() above\n\n :param network: the network to propoaate\n :param row: the row of input values\n :return: the values of the output layer\n \"\"\"\n\n if verbose:\n write_output(outputfile, 'Forward propogation example: ')\n\n # propagate through each layer\n for layer_num, layer in enumerate(network):\n layer_output = []\n\n if verbose:\n write_output(outputfile, f'layer {layer_num} inputs: {inputs}')\n\n # get the output for each neuron in the layer\n for neuron in layer:\n neuron['output'] = neuron_sigmoid_value(neuron['weights'], inputs)\n layer_output.append(neuron['output'])\n\n if verbose:\n write_output(outputfile, f'layer {layer_num} outputs: {layer_output}')\n\n inputs = layer_output\n\n # return the outputs for the final layer\n return inputs\n\n\ndef backward_propagate_error(network, actual):\n \"\"\"\n Backpropagate error through the network\n\n :param network: the network to propagate the error through\n :param actual: the actual value\n :return:\n \"\"\"\n\n if verbose:\n write_output(outputfile, 'Gradient calculation example: ')\n\n # iterate over each layer in the network in reverse order\n for layer_num in reversed(range(len(network))):\n layer = network[layer_num]\n errors = []\n\n # propagate error from a hidden layer\n if layer_num != len(network) - 1:\n for neuron_index in range(len(layer)):\n error = 0.0\n # propagate the error delta\n for neuron in network[layer_num + 1]:\n error += (neuron['weights'][neuron_index] * neuron['delta'])\n errors.append(error)\n\n # propagate error from the result layer\n else:\n for neuron_index in range(len(layer)):\n neuron = layer[neuron_index]\n errors.append(actual[neuron_index] - neuron['output'])\n\n # propagate the error based on the derivative of the sigmoid function\n for neuron_index in range(len(layer)):\n neuron = layer[neuron_index]\n neuron['delta'] = errors[neuron_index] * neuron['output'] * (1 - neuron['output'])\n\n if verbose:\n write_output(outputfile, f'the error for node {neuron_index} in layer {layer_num} is {errors[neuron_index]}, which correspondes to a gradient of {neuron[\"delta\"]}')\n\n\ndef update_weights(network, row, l_rate):\n \"\"\"\n Update the weights for the whole network\n\n :param network: the network to update the weights values of\n :param row: the result row\n :param l_rate: the specified learning rate\n \"\"\"\n\n if verbose:\n write_output(outputfile, 'Weight update example: ')\n\n # iterate over each layer in the network\n for layer_num in range(len(network)):\n\n # get the expected value for the layer\n inputs = row[:-1]\n if layer_num != 0:\n inputs = [neuron['output'] for neuron in network[layer_num - 1]]\n for neuron in network[layer_num]:\n for input_index in range(len(inputs)):\n\n if verbose:\n write_output(outputfile, f'update neuron {input_index} in layer {layer_num} weight from {neuron[\"weights\"][input_index]} by value of {l_rate * neuron[\"delta\"] * inputs[input_index]}')\n\n # update the neuron weights by the delta values\n neuron['weights'][input_index] += l_rate * neuron['delta'] * inputs[input_index]\n\n if verbose:\n write_output(outputfile, f'new weight value is {neuron[\"weights\"][input_index]}')\n # update the neuron bias by the delta value\n neuron['weights'][-1] += l_rate * neuron['delta']\n\n\ndef train_network(network, train, l_rate, n_epoch, n_outputs, class_key):\n \"\"\"\n Train a network based on parameters\n\n :param network: the network to train\n :param train: the training set\n :param l_rate: the learning rate\n :param n_epoch: the number of times to iterate over the training data\n :param n_outputs: the number of output nodes\n :param class_key: a key used to determine if we have regression or classification\n \"\"\"\n # handle classification training\n if class_key:\n for epoch in range(n_epoch+1):\n sum_error = 0\n # iterate over each training case\n for row in train:\n # compute the prediced output of the network\n outputs = forward_propagate(network, row)\n\n # get the expected output of the network\n expected = [0 for i in range(n_outputs)]\n expected[row[-1]] = 1\n\n # propagate the error and update the network\n backward_propagate_error(network, expected)\n update_weights(network, row, l_rate)\n\n # compute the error of the network\n sum_error += sum(abs(outputs[i] - expected[i]) for i in range(len(outputs)))\n\n if epoch % 100 == 0 or verbose:\n write_output(outputfile, f'epoch: {epoch}, lrate: {l_rate:.3f}, error: {sum_error:.8f}')\n\n # handle regression training\n else:\n for epoch in range(n_epoch+1):\n sum_error = 0\n # iterate over each training case\n for row in train:\n # compute the predicted output of the network\n output = forward_propagate(network, row)\n\n # get the expected output for the network\n expected = [row[-1] for i in range(n_outputs)]\n\n # propagate the error and update the network\n backward_propagate_error(network, expected)\n update_weights(network, row, l_rate)\n\n # compute the mse of the network\n sum_error = sum([(expected[i] - output[i]) ** 2 for i in range(len(expected))])\n\n if epoch % 100 == 0 or verbose:\n write_output(outputfile, f'epoch: {epoch}, lrate: {l_rate:.3f}, error: {sum_error:.8f}')\n\n\ndef initialize_network(n_inputs, n_layers, n_hidden, n_outputs):\n \"\"\"\n Initialize a network with random weights values\n\n :param n_inputs: the number of input nodes\n :param n_layers: the number of layers\n :param n_hidden: the number of hidden nodes per layer\n :param n_outputs: the number of output nodes\n :return: the initialized network\n \"\"\"\n\n # initialize an empty network\n network = []\n\n # create a network with 0 hidden layers\n if n_layers == 0:\n # initialize layer with random weights\n output_layer = [{'weights': [random() for _ in range(n_inputs + 1)]} for _ in range(n_outputs)]\n network.append(output_layer)\n\n # create a network with 1 hidden layers\n elif n_layers == 1:\n # initialize layers with random weights\n hidden_layer = [{'weights': [random() for _ in range(n_inputs + 1)]} for _ in range(n_hidden[0])]\n network.append(hidden_layer)\n output_layer = [{'weights': [random() for _ in range(n_hidden[0] + 1)]} for _ in range(n_outputs)]\n network.append(output_layer)\n\n # create a network with 2 hidden layers\n elif n_layers == 2:\n # initialize layers with random weights\n hidden_layer = [{'weights': [random() for _ in range(n_inputs + 1)]} for _ in range(n_hidden[0])]\n network.append(hidden_layer)\n hidden_layer2 = [{'weights': [random() for _ in range(n_hidden[0] + 1)]} for _ in range(n_hidden[1])]\n network.append(hidden_layer2)\n output_layer = [{'weights': [random() for _ in range(n_hidden[1] + 1)]} for _ in range(n_outputs)]\n network.append(output_layer)\n\n # handle if there are more than 2 hidden layers specified\n else:\n raise RuntimeException('Number of layers must be in [0, 1, 2]')\n\n # output_network(network)\n\n return network\n\n\ndef output_network(network):\n \"\"\"\n Helper function to output human readable network\n\n :param network: the network to output\n \"\"\"\n\n # get some network metadata (number of hidden layers, inputs, and outputs\n n_layers = len(network) - 1\n n_inputs = len(network[0][0]['weights']) - 1\n n_outputs = len(network[-1])\n\n # get the number of hidden nodes in each hidden layer\n n_hidden = []\n for layer in network[:-1]:\n n_hidden.append(len(layer))\n\n # output the network\n write_output(outputfile, f'the trained network:')\n write_output(outputfile, f'there are a total of {n_layers} hidden layers')\n write_output(outputfile, f'there are {n_inputs} inputs')\n if n_layers > 0:\n write_output(outputfile, f'there are {n_hidden[0]} nodes in the 1st hidden layer')\n if n_layers > 1:\n write_output(outputfile, f'there are {n_hidden[1]} nodes in the 2nd hidden layer')\n write_output(outputfile, f'there are {n_outputs} nodes in the output layer')\n for layer_num, layer in enumerate(network):\n if layer_num < n_layers:\n write_output(outputfile, f'layer {layer_num}: {layer}')\n else:\n write_output(outputfile, f'output layer: {layer}')\n for node_num, node in enumerate(layer):\n write_output(outputfile, f'node #{node_num}: {node}')\n\n\ndef predict(network, inputs, class_key):\n \"\"\"\n Return the prediction of a network\n\n :param network: the network to make the prediction\n :param row: the input values to predict\n :param class_key: a key used to determine if we have regression or classification\n :return: the predicted output\n \"\"\"\n outputs = forward_propagate(network, inputs)\n if class_key:\n return outputs.index(max(outputs))\n else:\n return outputs\n\n\ndef back_propagation(train, test, l_rate, n_epoch, n_layers, n_hidden, class_key):\n \"\"\"\n Train a network and return predicted values using back propogation\n\n :param train: the training set\n :param test: the testing set\n :param l_rate: the learning rate\n :param n_epoch: the number of times to iterate over the training data\n :param n_layers: the number of hidden layers\n :param n_hidden: the number of hidden nodes per hidden layer\n :param class_key: the translation from discretized class values to class name\n :return: the predictions from the trained network\n \"\"\"\n\n # get the number of inputs in the training data\n n_inputs = len(train[0]) - 1\n\n # initialize the number of outputs to the number of classes, or 1 for regression\n if class_key:\n n_outputs = len(set([row[-1] for row in train]))\n else:\n n_outputs = 1\n\n # initialize the network to train\n network = initialize_network(n_inputs, n_layers, n_hidden, n_outputs)\n\n # train the network\n train_network(network, train, l_rate, n_epoch, n_outputs, class_key)\n\n output_network(network)\n\n # predict the result based on the trained network\n predictions = []\n for row in test:\n prediction = predict(network, row, class_key)\n predictions.append(prediction)\n return predictions\n\n\ndef tune(train_set, tune_set, n_layers, class_key, regression_range):\n \"\"\"\n Tune the hyperparameters for back propogation\n\n :param train_set: the training set\n :param tune_set: the tuning set\n :param n_layers: the number of hidden layers\n :param class_key: the translation from discretized class values to class name\n :param regression_range: the range of the regression results\n :return:\n \"\"\"\n\n # specify the range of tuning parameters\n # l_rate_range = [0.01, 0.1, 0.3, 1, 10]\n # n_epoch_range = [500, 300, 200]\n l_rate_range = [10, 0.5, 0.1]\n n_epoch_range = [500, 300]\n n_hidden_val_range = [5, 9]\n\n # initialize best loss to very bad\n best_loss = float('inf')\n\n # create a grid search over the number of hidden nodes depending on the number of hidden layers\n n_hidden_vals = []\n if n_layers == 0: # 0 hidden layers\n n_hidden_vals.append([])\n elif n_layers == 1: # 1 hidden layer\n for n0 in n_hidden_val_range:\n n_hidden_vals.append([n0])\n elif n_layers == 2: # 2 hidden layers\n num_hidden = [None] * n_layers\n for n1 in n_hidden_val_range:\n num_hidden[0] = n1\n for n2 in n_hidden_val_range:\n num_hidden[1] = n2\n n_hidden_vals.append(num_hidden.copy())\n\n # create a grid search over all tunable parameters\n for l_rate in l_rate_range:\n for n_epoch in n_epoch_range:\n for n_hidden in n_hidden_vals:\n write_output(outputfile, f'l_rate: {l_rate}; n_epoch: {n_epoch}, n_hidden: {n_hidden}')\n\n # evaluate the loss of the tuning parameters\n loss = evaulate_back_prop(train_set, tune_set, l_rate, n_epoch, n_layers, n_hidden, class_key, regression_range)\n\n # find the best tuning parameters based off lowest loss\n if loss < best_loss:\n best_loss = loss\n best_l_rate = l_rate\n best_n_epoch = n_epoch\n best_n_hidden = n_hidden\n\n # return the best values for the tuning parameters\n write_output(outputfile, f'The tuned model looks like: l_rate: {best_l_rate}; n_epoch: {best_n_epoch}, n_hidden: {best_n_hidden}')\n write_output(outputfile, f'The model has a loss of {best_loss}')\n return best_l_rate, best_n_epoch, best_n_hidden\n\n\ndef evaulate_back_prop(train_set, test_set, l_rate, n_epoch, n_layers, n_hidden, class_key, regression_range):\n \"\"\"\n Compute the error for the trained network\n\n :param train_set: the training set\n :param test_set: the test set\n :param l_rate: the learning rate\n :param n_epoch: the number of times to iterate over the training data\n :param n_layers: the number of hidden layers\n :param n_hidden: the number of hidden nodes for each hidden layer\n :param class_key: the translation from discretized class values to class name\n :param regression_range: the range of the regression results\n :return:\n \"\"\"\n\n # get the predicted and actual results\n predicted = back_propagation(train_set, test_set, l_rate, n_epoch, n_layers, n_hidden, class_key)\n actual = [row[-1] for row in test_set]\n\n # write_output(outputfile, f'predicted: {predicted}')\n # write_output(outputfile, f'actual: {actual}')\n\n # iterate over each prediction\n for index, instance in enumerate(predicted):\n\n # output predicted and actual class\n if class_key:\n write_output(outputfile, \n f'predicted: {class_key[predicted[index]]}; actual: {class_key[actual[index]]}')\n # output predicted and actual value\n else:\n range = regression_range[1] - regression_range[0]\n write_output(outputfile, \n f'predicted: {predicted[index][0] * range + regression_range[0]}; actual: {actual[index] * range + regression_range[0]}')\n\n # compute the classification loss\n if class_key:\n loss = classification_accuracy(actual, predicted)\n # compute the mean squared error for regression\n else:\n loss = mse_accuracy(actual, predicted, regression_range)\n return loss\n\n\ndef main():\n\n # get the preprocessed dataframe\n df = preprocess(datafile, optionsfile, outputfile)\n\n # convert dataframe to list of lists\n dataset = df.values.tolist()\n\n # determine whether classification or regression and store keys\n if dict(df.dtypes)['result'] not in ['int64', 'float64'] or df[\"result\"].nunique() < 10:\n write_output(outputfile, \"This is a classification set\")\n class_key = discretize_result(dataset)\n regression_range = False\n else:\n write_output(outputfile, \"This is a regression set\")\n class_key = False\n regression_range = normalize_regression_results(dataset)\n\n write_output(outputfile, 'the dataset is: ')\n for row in dataset:\n write_output(outputfile, row)\n\n # split out the tuning dataset from the main dataset\n df, tune_set = split_tune_df(df)\n write_output(outputfile, f'the tuning data set:\\n {tune_set}')\n\n # process the tuning set\n tune_set = tune_set.values.tolist()\n if class_key:\n tune_set = apply_class_key(tune_set, class_key)\n else:\n tune_set = apply_regression_range(tune_set, regression_range)\n\n scores = []\n\n # split the remaining data into 5 groups for 5-fold cross validation\n group1, group2, group3, group4, group5 = split_5_fold_cross_validation(df)\n write_output(outputfile, f'group 1:\\n{group1}')\n write_output(outputfile, f'group 2:\\n{group2}')\n write_output(outputfile, f'group 3:\\n{group3}')\n write_output(outputfile, f'group 4:\\n{group4}')\n write_output(outputfile, f'group 5:\\n{group5}')\n groups = [group1, group2, group3, group4, group5]\n\n # cross validate each of the 5 sets\n for index, group in enumerate(groups):\n groups_copy = groups.copy()\n test_set = groups_copy.pop(index)\n train_set = pd.concat(groups_copy, axis=0)\n train_set = train_set.reset_index()\n del train_set['index']\n\n # process the training and test set\n test_set = test_set.values.tolist()\n train_set = train_set.values.tolist()\n if class_key:\n train_set = apply_class_key(train_set, class_key)\n test_set = apply_class_key(test_set, class_key)\n else:\n train_set = apply_regression_range(train_set, regression_range)\n test_set = apply_regression_range(test_set, regression_range)\n\n # tune the hyperparameters\n l_rate, n_epoch, n_hidden = tune(train_set, tune_set, n_layers, class_key, regression_range)\n\n # evaluate loss and save to list for each cross validation fold\n loss = evaulate_back_prop(train_set, test_set, l_rate, n_epoch, n_layers, n_hidden, class_key, regression_range)\n scores.append(loss)\n\n # output the results of the 5 cross fold validation\n write_output(outputfile, f'The set was {data_set}')\n write_output(outputfile, 'Error by fold: %s' % scores)\n if class_key:\n write_output(outputfile, f'% Incorrect: {(sum(scores) / float(len(scores))):.3f}%')\n else:\n write_output(outputfile, f'Total MSE: {sum(scores)}')\n\nif __name__ == '__main__':\n\n # initialize some options based on the selected set\n with open('config.yaml') as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n data_set = config['set']\n verbose = config['verbose']\n n_layers = config['layers']\n datafile = f'DataSets/{data_set}.data'\n optionsfile = f'DataSets/{data_set}.options.yaml'\n outputfile = f'output/{data_set}_{n_layers}_hiddenlayers.output.txt'\n\n # clear the contents of the outputfile\n open(outputfile, 'w').close()\n\n main()\n", "sub_path": "IntroMachineLearning/Module09-11/NeuralNetwork/NeuralNet.py", "file_name": "NeuralNet.py", "file_ext": "py", "file_size_in_byte": 24118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "math.exp", "line_number": 166, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 169, "usage_type": "call"}, {"api_name": "random.random", "line_number": 355, "usage_type": "call"}, {"api_name": "random.random", "line_number": 361, "usage_type": "call"}, {"api_name": "random.random", "line_number": 363, "usage_type": "call"}, {"api_name": "random.random", "line_number": 369, "usage_type": "call"}, {"api_name": "random.random", "line_number": 371, "usage_type": "call"}, {"api_name": "random.random", "line_number": 373, "usage_type": "call"}, {"api_name": "preprocess_datafile.preprocess", "line_number": 581, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 659, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 659, "usage_type": "attribute"}]} +{"seq_id": "498291848", "text": "from sqlalchemy import create_engine, types\nfrom .db_credentials import *\nimport pandas as pd\nimport logging\nimport cx_Oracle\nimport sys\n\n\nlogging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)\n\nservers = {\n 'edm': edm_db_config,\n 'fra': fra_db_config,\n 'houstby': houstby_db_config,\n 'houprod': houprod_db_config,\n 'nor': nor_db_config,\n 'sha': sha_db_config,\n 'dw': datawarehouse_db_config\n}\n\ndef quick_etl(sql, table_name, target_db, source_db):\n \"\"\"\nparams\n sql str, sql statement\n table_name str, name of staging table name\n target_db str, dw, ds or excel\n source_db str or list, \"all\" or a list of sites \"['houstby','edm',...]\"\n current sites are edm, fra, houstby, houprod, nor, sha and dw\n \"\"\"\n\n if target_db.lower() == 'dw':\n eng = create_engine('oracle+cx_oracle://', connect_args=datawarehouse_db_config, echo=False)\n elif target_db.lower() == 'ds':\n eng = create_engine('oracle+cx_oracle://', connect_args=dataservices_db_config, echo=False)\n elif target_db.lower() == 'excel':\n pass\n else:\n exit('incorrect target')\n\n if source_db == 'all':\n selected_sites = list(servers.keys())\n selected_sites.remove('houprod')\n selected_sites.remove('dw')\n else:\n selected_sites = source_db\n\n data = []\n for site in selected_sites:\n\n print('{div} {site} {div}'.format(site=site.upper(), div='='*30))\n\n if site not in servers.keys():\n print('%s not valid!' % site.upper(), file=sys.stderr)\n continue\n try:\n conn = create_engine('oracle+cx_oracle://', connect_args=servers[site], echo=False)\n d = conn.execute(sql)\n meta = d.cursor.description\n resultset = d.fetchall()\n\n print('ROWCOUNT %d' % d.rowcount)\n\n df = pd.DataFrame.from_records(resultset, columns=[c[0] for c in meta])\n data.append(df)\n except Exception as e:\n print(e, file=sys.stderr)\n if site == 'houstby' and 'read-only' in str(e):\n selected_sites.append('houprod')\n continue\n\n df = pd.concat(data, sort=False)\n\n dtyp = {}\n for col in meta:\n name = col[0]\n type = col[1]\n size = col[2] if col[2] else 1\n\n if type == cx_Oracle.STRING:\n dtyp[name] = types.VARCHAR(size)\n elif type == cx_Oracle.FIXED_CHAR:\n dtyp[name] = types.VARCHAR(size)\n elif type == cx_Oracle.NUMBER:\n dtyp[name] = types.FLOAT(size)\n elif type == cx_Oracle.DATETIME:\n dtyp[name] = types.DATE\n elif type == cx_Oracle.TIMESTAMP:\n dtyp[name] = types.TIMESTAMP\n\n print('{div} {site} {div}'.format(site=target_db.upper(), div='='*30))\n print('META')\n print(meta)\n # print('\\n'.join(meta))\n\n if target_db == 'excel':\n with pd.ExcelWriter(table_name) as writer:\n df.to_excel(writer, index=False, sheet_name='Sheet1', freeze_panes=(1, 0))\n writer.book.create_sheet('Sheet2')\n writer.book.worksheets[1]['A2'] = sql\n else:\n df.to_sql(table_name.lower(), eng, if_exists='replace', index=False, dtype=dtyp,\n chunksize=20000)\n", "sub_path": "etl/quick_etl.py", "file_name": "quick_etl.py", "file_ext": "py", "file_size_in_byte": 3286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 71, "usage_type": "call"}, {"api_name": "cx_Oracle.STRING", "line_number": 79, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.VARCHAR", "line_number": 80, "usage_type": "call"}, {"api_name": "sqlalchemy.types", "line_number": 80, "usage_type": "name"}, {"api_name": "cx_Oracle.FIXED_CHAR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.VARCHAR", "line_number": 82, "usage_type": "call"}, {"api_name": "sqlalchemy.types", "line_number": 82, "usage_type": "name"}, {"api_name": "cx_Oracle.NUMBER", "line_number": 83, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.FLOAT", "line_number": 84, "usage_type": "call"}, {"api_name": "sqlalchemy.types", "line_number": 84, "usage_type": "name"}, {"api_name": "cx_Oracle.DATETIME", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.DATE", "line_number": 86, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 86, "usage_type": "name"}, {"api_name": "cx_Oracle.TIMESTAMP", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.TIMESTAMP", "line_number": 88, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 88, "usage_type": "name"}, {"api_name": "pandas.ExcelWriter", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "241359116", "text": "from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom .models import Post\n\n\nclass PostListCreateTest(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.url = reverse('post-list')\n self.user = get_user_model().objects.create_user(\n email='testuser@test.com',\n username='testuser',\n password='password'\n )\n self.client.force_authenticate(user=self.user)\n\n def test_post_create_correct(self):\n data = {\n \"text\": \"Test post #1 by user {}\".format(self.user.username)\n }\n response = self.client.post(self.url, data)\n self.assertEqual(status.HTTP_201_CREATED, response.status_code)\n self.assertEqual(self.user.email, response.data['author'])\n\n def test_post_create_no_text(self):\n response = self.client.post(self.url, {})\n text_err = response.data['text']\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n self.assertIn(\"This field is required.\", text_err)\n\n def test_post_list(self):\n Post.objects.create(author=self.user.profile, text=\"Test text 1\")\n Post.objects.create(author=self.user.profile, text=\"Test text 2\")\n response = self.client.get(self.url)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(Post.objects.filter(author=self.user.profile).count(), response.data['count'])\n\n\nclass PostDetailTest(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n email='testuser@test.com',\n username='testuser',\n password='password'\n )\n self.client.force_authenticate(user=self.user)\n self.post = Post.objects.create(author=self.user.profile, text=\"Test text #1\")\n self.url = reverse('post-detail', kwargs={\"pk\": self.post.pk})\n\n def test_get_post(self):\n response = self.client.get(self.url)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(self.post.text, response.data['text'])\n\n def test_post_delete_by_author(self):\n post = Post.objects.create(author=self.user.profile, text=\"Test text #2\")\n url = reverse('post-detail', kwargs={\"pk\": post.pk})\n response = self.client.delete(url)\n self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code)\n self.assertFalse(Post.objects.filter(pk=post.pk).exists())\n\n def test_post_delete_alien(self):\n other_user = get_user_model().objects.create_user(\n email='testuser2@test.com',\n username='testuser2',\n password='password'\n )\n post = Post.objects.create(author=other_user.profile, text=\"Test text #3\")\n url = reverse('post-detail', kwargs={\"pk\": post.pk})\n response = self.client.delete(url)\n self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)\n\n def test_post_delete_not_found(self):\n url = reverse('post-detail', kwargs={\"pk\": 666})\n response = self.client.delete(url)\n self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)\n\n\nclass LikesTest(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n email='testuser@test.com',\n username='testuser',\n password='password'\n )\n self.client.force_authenticate(user=self.user)\n self.post = Post.objects.create(author=self.user.profile, text=\"Test text #1\")\n self.like_url = reverse('like-post', kwargs={\"pk\": self.post.pk})\n self.unlike_url = reverse('unlike-post', kwargs={\"pk\": self.post.pk})\n\n def test_post_like(self):\n response = self.client.post(self.like_url)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(self.post.pk, response.data['post'])\n self.assertEqual(self.user.email, response.data['profile'])\n self.assertTrue(response.data['liked'])\n\n def test_post_like_not_found(self):\n url = reverse('like-post', kwargs={\"pk\": 888})\n response = self.client.post(url)\n self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)\n\n def test_post_unlike(self):\n self.post.likes.create(profile=self.user.profile)\n response = self.client.post(self.unlike_url)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(self.post.pk, response.data['post'])\n self.assertEqual(self.user.email, response.data['profile'])\n self.assertFalse(response.data['liked'])\n\n def test_post_unlike_not_found(self):\n url = reverse('unlike-post', kwargs={\"pk\": 888})\n response = self.client.post(url)\n self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)\n", "sub_path": "apps/posts/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 5003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 39, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Post.objects.create", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 51, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 56, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 56, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 60, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 63, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Post.objects.create", "line_number": 72, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 72, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 75, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 78, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 80, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 80, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 83, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 86, "usage_type": "call"}, {"api_name": "models.Post.objects.create", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 92, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 93, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 98, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 98, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 104, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 106, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 106, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 111, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 111, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 117, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 119, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "315036651", "text": "import pulp\nimport random \n\nnum_item = 10\nb = 20\nc = [random.randint(1, 10) for _ in range(num_item)]\na = [random.randint(1, 10) for _ in range(num_item)]\n\nprob = pulp.LpProblem('Knapsack', pulp.LpMaximize)\nx = [pulp.LpVariable(name='x{}'.format(i), cat=pulp.LpInteger, lowBound=0, upBound=1) for i in range(num_item)] # 変数の定義\nobj = pulp.lpSum(c[i]*x[i] for i in range(num_item))\nprob.setObjective(obj)\nprob += pulp.lpSum([a[i]*x[i] for i in range(num_item)]) <= b\n\n# 問題の書き込み\n# prob.writeLP(\"Knapsack.lp\")\nprob.solve()\nprint(\"Status:\", pulp.LpStatus[prob.status])\nprint(\"ObjValue = {}\".format(pulp.value(prob.objective)))\nfor v in prob.variables():\n print(v.name, \"=\", v.varValue)\n\n\n\"\"\"\n$ python test_pulp.py\nStatus: Optimal\nObjValue = 35.0\nx0 = 1.0\nx1 = 0.0\nx2 = 1.0\nx3 = 1.0\nx4 = 0.0\nx5 = 0.0\nx6 = 1.0\nx7 = 0.0\nx8 = 0.0\nx9 = 1.0\n\"\"\"", "sub_path": "optimization/test_pulp.py", "file_name": "test_pulp.py", "file_ext": "py", "file_size_in_byte": 861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "random.randint", "line_number": 6, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 7, "usage_type": "call"}, {"api_name": "pulp.LpProblem", "line_number": 9, "usage_type": "call"}, {"api_name": "pulp.LpMaximize", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable", "line_number": 10, "usage_type": "call"}, {"api_name": "pulp.LpInteger", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pulp.lpSum", "line_number": 11, "usage_type": "call"}, {"api_name": "pulp.lpSum", "line_number": 13, "usage_type": "call"}, {"api_name": "pulp.LpStatus", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pulp.value", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "36814114", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport click\n\n\n@click.group()\ndef main():\n pass\n\n\n@main.command()\n@click.option(\n '--port', type=int,\n default=9000, show_default=True,\n help='The port number used by the server.',\n)\n@click.option(\n '--text', type=str, required=True,\n help='The input string to be segmented.',\n)\ndef segment(port, text):\n '''Send request for word segmentation.'''\n\n from salada import rpc\n\n client = rpc.Client(port)\n print(client.segment(text))\n\n\n@main.command()\ndef complete():\n '''Send request for word completion.'''\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "examples/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "click.group", "line_number": 7, "usage_type": "call"}, {"api_name": "salada.rpc.Client", "line_number": 27, "usage_type": "call"}, {"api_name": "salada.rpc", "line_number": 27, "usage_type": "name"}, {"api_name": "click.option", "line_number": 13, "usage_type": "call"}, {"api_name": "click.option", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "439892760", "text": "from django.shortcuts import render\nimport requests\n# Create your views here.\n\ndef index(request):\n\turl = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=YOUR_API_KEY'\n\tcity = 'Annandale'\n\tr = requests.get(url.format(city))\n\tprint(r.text)\n\n\treturn render(request, 'weather/home.html')", "sub_path": "Django2/the_weather/weather/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "230423733", "text": "#!/usr/bin/env python\n#\n# Prepare: ./analysis/concat-text.py data/2016_donald-trump/speeches/* > /tmp/all-trump.txt\n# Run: ./analysis/trump-lexical-dispersion-plot.py < /tmp/all-trump.txt\n#\nimport sys\nimport nltk\n\nreload(sys) # Reload does the trick!\nsys.setdefaultencoding('UTF8')\n\nraw = sys.stdin.read()\ntokens = nltk.word_tokenize(raw)\ntext = nltk.Text(tokens)\nnltk.draw.dispersion_plot(text, [\n \"isis\",\n \"mexico\",\n \"wall\",\n \"immigration\",\n \"obamacare\",\n \"crooked\",\n \"hillary\",\n \"bernie\",\n \"obama\",\n \"rigged\",\n \"law\",\n \"order\",\n \"economy\",\n \"jobs\",\n \"trade\",\n \"russia\",\n \"putin\",\n], \nignore_case=True,\ntitle='Lexical Dispersion Plot of Select Phrases in Time-Ordered Trump Tweets (after nomination)')\n", "sub_path": "analysis/trump-lexical-dispersion-plot.py", "file_name": "trump-lexical-dispersion-plot.py", "file_ext": "py", "file_size_in_byte": 756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.stdin.read", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "nltk.word_tokenize", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.Text", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.draw.dispersion_plot", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.draw", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "210525484", "text": "import numpy as np\nimport matplotlib.pyplot\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\nimport torch\nimport os\nimport cv2\n\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\nfont_label = {'family' : 'Arial',\n 'color' : 'black',\n 'weight' : 'normal',\n 'size' : 12,\n }\n\nfeature_path = '../../demo/res/car11005'\nfeature_path_2 = '../../demo/res/car11005_withoutAtt'\n\nframe_list = ['60','80','100']\n\ngs = gridspec.GridSpec(5, len(frame_list))\n\nfor idx, frame in enumerate(frame_list):\n im = cv2.imread(os.path.join(feature_path, frame + '.jpg'))\n _, state,_ = torch.load(os.path.join(feature_path, frame + '.pkl'))\n _, state2 = torch.load(os.path.join(feature_path_2, frame + '.pkl'))\n cell, hidden = state[0]\n cell2, hidden2 = state2[0]\n cell3,_ = state[1]\n cell4,_ = state2[1]\n # hidden = cell\n cell_map = torch.zeros(cell.size()[2:])\n # hidden_map = torch.zeros(hidden.size()[2:])\n cell2_map = torch.zeros(cell2.size()[2:])\n # hidden2_map = torch.zeros(hidden2.size()[2:])\n cell3_map = torch.zeros(cell3.size()[2:])\n cell4_map = torch.zeros(cell4.size()[2:])\n\n\n for i in range(cell_map.size(0)):\n for j in range(cell_map.size(1)):\n # a = cell.cpu().data[0,:,i,j]\n cell_map[i,j] = torch.norm(cell.cpu().data[0,:,i,j], 2)\n # hidden_map[i, j] = torch.norm(hidden.cpu().data[0, :, i, j], 2)\n cell2_map[i, j] = torch.norm(cell2.cpu().data[0, :, i, j], 2)\n # hidden2_map[i, j] = torch.norm(hidden2.cpu().data[0, :, i, j], 2)\n for i in range(cell3_map.size(0)):\n for j in range(cell3_map.size(1)):\n # a = cell.cpu().data[0,:,i,j]\n cell3_map[i, j] = torch.norm(cell3.cpu().data[0, :, i, j], 2)\n cell4_map[i, j] = torch.norm(cell4.cpu().data[0, :, i, j], 2)\n\n # cell_max_norm = torch.max(cell_map)\n # cell_map /= cell_max_norm\n #\n # hidden_max_norm = torch.max(hidden_map)\n # hidden_map /= hidden_max_norm\n cell_map = cv2.resize(cell_map.numpy(), (im.shape[1], im.shape[0]))\n cell2_map = cv2.resize(cell2_map.numpy(), (im.shape[1], im.shape[0]))\n # hidden_map = cv2.resize(hidden_map.numpy(), (im.shape[1], im.shape[0]))\n # hidden2_map = cv2.resize(hidden2_map.numpy(), (im.shape[1], im.shape[0]))\n cell3_map = cv2.resize(cell3_map.numpy(), (im.shape[1], im.shape[0]))\n cell4_map = cv2.resize(cell4_map.numpy(), (im.shape[1], im.shape[0]))\n\n ax = plt.subplot(gs[0, idx])\n plt.imshow(im)\n ax.spines['right'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.set_xticks([])\n ax.set_yticks([])\n plt.title('frame: '+frame, fontdict=font_label)\n\n ax = plt.subplot(gs[1, idx])\n plt.imshow(cell2_map, cmap=plt.get_cmap('jet'))\n ax.spines['right'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.set_xticks([])\n ax.set_yticks([])\n if idx == 0:\n plt.ylabel('(a)', fontdict=font_label)\n\n ax = plt.subplot(gs[2, idx])\n plt.imshow(cell_map, cmap=plt.get_cmap('jet'))\n ax.spines['right'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.set_xticks([])\n ax.set_yticks([])\n if idx == 0:\n plt.ylabel('(b)', fontdict=font_label)\n\n ax = plt.subplot(gs[3, idx])\n plt.imshow(cell4_map, cmap=plt.get_cmap('jet'))\n ax.spines['right'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.set_xticks([])\n ax.set_yticks([])\n if idx == 0:\n plt.ylabel('(c)', fontdict=font_label)\n\n ax = plt.subplot(gs[4, idx])\n plt.imshow(cell3_map, cmap=plt.get_cmap('jet'))\n ax.spines['right'].set_color('none')\n ax.spines['left'].set_color('none')\n ax.spines['bottom'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.set_xticks([])\n ax.set_yticks([])\n if idx == 0:\n plt.ylabel('(d)', fontdict=font_label)\n\nplt.show()\n\n\n", "sub_path": "utils/exp/fea_vis.py", "file_name": "fea_vis.py", "file_ext": "py", "file_size_in_byte": 4287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 24, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "476502630", "text": "#!/usr/bin/env python\n\nimport rospy\nimport serial\nimport os\nfrom gpspub.msg import adv_gpsmsg\nfrom sensor_msgs.msg import NavSatFix\n\nos.system('echo %s | sudo -S %s' % ('ADV', 'chmod 777 /dev/ttyTHS0'))\npub = rospy.Publisher('ADVGPSData', adv_gpsmsg, queue_size = 5)\npub_nav = rospy.Publisher('GPSData', NavSatFix, queue_size = 5)\nrospy.init_node('ADVGPSPublisher', anonymous = False)\n\nser=serial.Serial(\"/dev/ttyTHS0\",115200,timeout=2)\nlatitude = 0.0\nlongitude = 0.0\nflag = 0\nspeed = 0.0\naccuracy = 0.0\ncounter = 0\ngpsdata = adv_gpsmsg()\ngpsdata_nav = NavSatFix()\ngpsdata_nav.header.frame_id = \"gps\"\n\nwhile not rospy.is_shutdown():\n\n\n\n\n line = ser.readline()\n\n try:\n line = str(line)\n\n #Latitude\n if line.startswith('la'):\n latitude = float(line[2:-2])\n gpsdata.Latitude = latitude\n gpsdata_nav.latitude = latitude\n rospy.loginfo('la:' + str(latitude))\n counter = counter + 1\n #Longitude\n elif line.startswith('lo'):\n longitude = float(line[2:-2])\n gpsdata.Longitude = longitude\n gpsdata_nav.longitude = longitude\n rospy.loginfo('lo:' + str(longitude))\n counter = counter + 1\n #Speed\n elif line.startswith('s'):\n speed = float(line[1:-2])\n gpsdata.Speed = speed\n rospy.loginfo('s:' + str(speed))\n counter = counter + 1\n #Accuracy\n elif line.startswith('a'):\n accuracy = float(line[1:-2])\n gpsdata.Accuracy = accuracy\n rospy.loginfo('a:' + str(accuracy))\n counter = counter + 1\n #Flag\n elif line.startswith('f'):\n flag = int(line[1:-2])\n gpsdata.Flag = flag\n rospy.loginfo('f:' + str(flag))\n counter = counter + 1\n if (counter == 5):\n pub.publish(gpsdata)\n pub_nav.publish(gpsdata_nav)\n counter = 0\n \n \n except Exception as err:\n rospy.loginfo(\"Error Info:\" + repr(err))\n continue\n\n \n\n", "sub_path": "src/gpspub/scripts/gpspublisher.py", "file_name": "gpspublisher.py", "file_ext": "py", "file_size_in_byte": 2084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.system", "line_number": 9, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 10, "usage_type": "call"}, {"api_name": "gpspub.msg.adv_gpsmsg", "line_number": 10, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 11, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.NavSatFix", "line_number": 11, "usage_type": "argument"}, {"api_name": "rospy.init_node", "line_number": 12, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 14, "usage_type": "call"}, {"api_name": "gpspub.msg.adv_gpsmsg", "line_number": 21, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.NavSatFix", "line_number": 22, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 25, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 40, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 47, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 53, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 59, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 65, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "284854987", "text": "from django.test import TestCase, SimpleTestCase\nfrom django.db import IntegrityError\nfrom django.db.models import get_app, get_models\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom edc.lab.lab_profile.classes import site_lab_profiles\nfrom edc.lab.lab_profile.exceptions import AlreadyRegistered as AlreadyRegisteredLabProfile\nfrom edc.core.bhp_content_type_map.classes import ContentTypeMapHelper\nfrom edc.subject.entry.models import Entry\nfrom edc.core.bhp_content_type_map.models import ContentTypeMap\nfrom edc.testing.classes import TestAppConfiguration, TestVisitSchedule, TestLabProfile\nfrom edc.subject.lab_tracker.classes import site_lab_tracker\n\nfrom ..models import VisitDefinition\nfrom ..classes import Permissions\nfrom .factories import VisitDefinitionFactory\n\n\nclass TestPermissions(SimpleTestCase):\n\n def startup(self):\n try:\n site_lab_profiles.register(TestLabProfile())\n except AlreadyRegisteredLabProfile:\n pass\n\n TestAppConfiguration()\n site_lab_tracker.autodiscover()\n TestVisitSchedule().build()\n\n# content_type_map_helper = ContentTypeMapHelper()\n# content_type_map_helper.populate()\n# content_type_map_helper.sync()\n# visit_tracking_content_type_map = ContentTypeMap.objects.get(content_type__model='testvisit')\n# try:\n# visit_definition = VisitDefinitionFactory(code='1000', visit_tracking_content_type_map=visit_tracking_content_type_map)\n# for index, content_type in enumerate(ContentType.objects.filter(app_label='testing')):\n# model = content_type.model_class()\n# if 'entry_meta_data_manager' in dir(model):\n# content_type_map = ContentTypeMap.objects.get(content_type=content_type)\n# Entry.objects.create(visit_definition=visit_definition, content_type_map=content_type_map, entry_order=index)\n# except IntegrityError:\n# pass\n# \n# try:\n# visit_definition = VisitDefinitionFactory(code='2000', visit_tracking_content_type_map=visit_tracking_content_type_map)\n# for index, content_type in enumerate(ContentType.objects.filter(app_label='testing')[2:]):\n# model = content_type.model_class()\n# if 'entry_meta_data_manager' in dir(model):\n# content_type_map = ContentTypeMap.objects.get(content_type=content_type)\n# Entry.objects.create(visit_definition=visit_definition, content_type_map=content_type_map, entry_order=index)\n# except IntegrityError:\n# pass\n\n# self.group = Group.objects.create(name='field_staff')\n\n def test_adds_permissions1(self):\n self.startup()\n permissions = Permissions('field_staff', ['add'], visit_definition_codes=['1000'])\n permissions.replace()\n group = Group.objects.get(name='field_staff')\n self.assertGreater(group.permissions.all().count(), 0)\n\n def test_adds_permissions2(self):\n \"\"\"Adds permissions to the group for just add.\"\"\"\n self.startup()\n permissions = Permissions('field_staff', ['add'], visit_definition_codes=['1000'])\n permissions.clear_permissions_for_group()\n group = Group.objects.get(name='field_staff')\n permissions.replace()\n self.assertGreater(group.permissions.filter(codename__icontains='add_').count(), 0)\n self.assertEqual(group.permissions.filter(codename__icontains='change_').count(), 0)\n self.assertEqual(group.permissions.filter(codename__icontains='delete_').count(), 0)\n\n def test_adds_permissions3(self):\n \"\"\"Adds permissions to the group for both add and change.\"\"\"\n self.startup()\n codes = ['1000']\n visit_definitions = VisitDefinition.objects.filter(code__in=codes)\n entry_count = Entry.objects.filter(visit_definition__in=visit_definitions).count()\n permissions = Permissions('field_staff', ['add', 'change'], visit_definition_codes=codes)\n permissions.replace()\n group = Group.objects.get(name='field_staff')\n self.assertEqual(group.permissions.filter(codename__icontains='add_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='change_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='delete_').count(), 0)\n\n def test_adds_permissions4(self):\n \"\"\"Adds permissions for visit 1000 to the group for add and change, delete.\"\"\"\n self.startup()\n codes = ['1000']\n visit_definitions = VisitDefinition.objects.filter(code__in=codes)\n entry_count = Entry.objects.filter(visit_definition__in=visit_definitions).count()\n permissions = Permissions('field_staff', ['add', 'change', 'delete'], visit_definition_codes=codes)\n permissions.replace()\n group = Group.objects.get(name='field_staff')\n self.assertEqual(group.permissions.filter(codename__icontains='add_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='change_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='delete_').count(), entry_count)\n\n def test_adds_permissions5(self):\n \"\"\"Adds permissions for another visit, 2000, for add, change and delete.\"\"\"\n self.startup()\n codes = ['2000']\n visit_definitions = VisitDefinition.objects.filter(code__in=codes)\n entry_count = Entry.objects.filter(visit_definition__in=visit_definitions).count()\n permissions = Permissions('field_staff', ['add', 'change', 'delete'], visit_definition_codes=codes)\n permissions.replace()\n group = Group.objects.get(name='field_staff')\n self.assertEqual(group.permissions.filter(codename__icontains='add_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='change_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='delete_').count(), entry_count)\n\n def test_adds_permissions6(self):\n \"\"\"Adds permissions for both visits, 1000 and 2000, to the group for both add and change and delete and does not duplicate.\"\"\"\n self.startup()\n codes = ['1000', '2000']\n visit_definitions = VisitDefinition.objects.filter(code__in=codes)\n entries = Entry.objects.filter(visit_definition__in=visit_definitions)\n entry_count = len(list(set([entry.content_type_map.content_type_id for entry in entries])))\n permissions = Permissions('field_staff', ['add', 'change', 'delete'], visit_definition_codes=codes)\n permissions.replace()\n group = Group.objects.get(name='field_staff')\n self.assertEqual(group.permissions.filter(codename__icontains='add_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='change_').count(), entry_count)\n self.assertEqual(group.permissions.filter(codename__icontains='delete_').count(), entry_count)\n\n def test_adds_permissions7(self):\n \"\"\"Creates a group if it does not exist.\"\"\"\n self.startup()\n codes = ['1000']\n permissions = Permissions('field_staff_team', ['add'], visit_definition_codes=codes)\n permissions.replace()\n group = Group.objects.get(name='field_staff_team')\n self.assertGreater(group.permissions.all().count(), 0)\n\n def test_adds_permissions8(self):\n \"\"\"Adds permissions for all models in the app\"\"\"\n self.startup()\n permissions = Permissions('field_staff_team', ['add'], app_label='testing')\n permissions.replace()\n group = Group.objects.get(name='field_staff_team')\n app = get_app('testing')\n models = get_models(app)\n self.assertEquals(group.permissions.all().count(), len(models))\n\n def test_adds_permissions9(self):\n \"\"\"Adds permissions for specified list of models in the app.\"\"\"\n self.startup()\n group = Group.objects.get(name='field_staff_team')\n permissions = Permissions('field_staff', ['add', 'change'], app_label='testing', models=['testvisit', 'TestScheduledModel'])\n permissions.replace()\n self.assertEqual(group.permissions.filter(codename='add_testvisit').count(), 1)\n self.assertEqual(group.permissions.filter(codename='add_testscheduledmodel').count(), 1)\n self.assertEqual(group.permissions.filter(codename='change_testvisit').count(), 1)\n self.assertEqual(group.permissions.filter(codename='change_testscheduledmodel').count(), 1)\n\n def test_adds_permissions10(self):\n \"\"\"Raises error if model not in app\"\"\"\n self.startup()\n self.assertRaises(AttributeError, Permissions, 'field_staff', ['add', 'change'], app_label='testing', models=['testvisit', 'BadDogModel'])\n", "sub_path": "edc/subject/visit_schedule/tests/test_permissions.py", "file_name": "test_permissions.py", "file_ext": "py", "file_size_in_byte": 8941, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.test.SimpleTestCase", "line_number": 20, "usage_type": "name"}, {"api_name": "edc.lab.lab_profile.classes.site_lab_profiles.register", "line_number": 24, "usage_type": "call"}, {"api_name": "edc.lab.lab_profile.classes.site_lab_profiles", "line_number": 24, "usage_type": "name"}, {"api_name": "edc.testing.classes.TestLabProfile", "line_number": 24, "usage_type": "call"}, {"api_name": "edc.lab.lab_profile.exceptions.AlreadyRegistered", "line_number": 25, "usage_type": "name"}, {"api_name": "edc.testing.classes.TestAppConfiguration", "line_number": 28, "usage_type": "call"}, {"api_name": "edc.subject.lab_tracker.classes.site_lab_tracker.autodiscover", "line_number": 29, "usage_type": "call"}, {"api_name": "edc.subject.lab_tracker.classes.site_lab_tracker", "line_number": 29, "usage_type": "name"}, {"api_name": "edc.testing.classes.TestVisitSchedule", "line_number": 30, "usage_type": "call"}, {"api_name": "classes.Permissions", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 62, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 70, "usage_type": "name"}, {"api_name": "models.VisitDefinition.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "models.VisitDefinition.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.VisitDefinition", "line_number": 80, "usage_type": "name"}, {"api_name": "edc.subject.entry.models.Entry.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "edc.subject.entry.models.Entry.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "edc.subject.entry.models.Entry", "line_number": 81, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 84, "usage_type": "name"}, {"api_name": "models.VisitDefinition.objects.filter", "line_number": 93, "usage_type": "call"}, {"api_name": "models.VisitDefinition.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.VisitDefinition", "line_number": 93, "usage_type": "name"}, {"api_name": "edc.subject.entry.models.Entry.objects.filter", "line_number": 94, "usage_type": "call"}, {"api_name": "edc.subject.entry.models.Entry.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "edc.subject.entry.models.Entry", "line_number": 94, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 97, "usage_type": "name"}, {"api_name": "models.VisitDefinition.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.VisitDefinition.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.VisitDefinition", "line_number": 106, "usage_type": "name"}, {"api_name": "edc.subject.entry.models.Entry.objects.filter", "line_number": 107, "usage_type": "call"}, {"api_name": "edc.subject.entry.models.Entry.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "edc.subject.entry.models.Entry", "line_number": 107, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 108, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 110, "usage_type": "name"}, {"api_name": "models.VisitDefinition.objects.filter", "line_number": 119, "usage_type": "call"}, {"api_name": "models.VisitDefinition.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.VisitDefinition", "line_number": 119, "usage_type": "name"}, {"api_name": "edc.subject.entry.models.Entry.objects.filter", "line_number": 120, "usage_type": "call"}, {"api_name": "edc.subject.entry.models.Entry.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "edc.subject.entry.models.Entry", "line_number": 120, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 122, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 124, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 124, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 135, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 135, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 135, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 141, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 143, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 143, "usage_type": "name"}, {"api_name": "django.db.models.get_app", "line_number": 144, "usage_type": "call"}, {"api_name": "django.db.models.get_models", "line_number": 145, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 151, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 151, "usage_type": "name"}, {"api_name": "classes.Permissions", "line_number": 152, "usage_type": "call"}, {"api_name": "classes.Permissions", "line_number": 162, "usage_type": "argument"}]} +{"seq_id": "364966404", "text": "import sys\n\nimport numpy as np\nimport datetime as dt\n\nfrom config.config import config\nfrom lib import visualization, mysql_utils as mysql\n\n#DETECTOR_DATA_TABLE = \"detector_data_processed_2017_1\"\nDETECTOR_DATA_TABLE = \"detector_data_processed_2017\"\nDETECTOR_ID = \"608219\"\nDETECTOR_DATA_QUERY = \"SELECT DetectorID, Year, Month, Day, Time, Volume, Occupancy \\\n FROM {} AS DD NATURAL JOIN detector_health AS DH \\\n WHERE DetectorID = {} AND Health = 1 \\\n ORDER BY Year, Month, Day, Time;\"\n\n\ndef query_detector_data(cursor, table, detector_id, graph=False):\n query = DETECTOR_DATA_QUERY.format(table, detector_id)\n\n cursor = mysql.query(cursor, query)\n \n if cursor == None:\n return\n\n time = []\n volume = []\n occupancy = []\n #speed = []\n\n for row in cursor:\n d = dt.datetime(row[1], row[2], row[3], row[4] // 3600, (row[4] % 3600) // 60, row[4] % 60)\n time.append(d)\n\n volume.append(row[5])\n occupancy.append(row[6])\n #speed.append(row[7])\n\n time = np.array(time)\n volume = np.array(volume)\n occupancy = np.array(occupancy)\n occupancy_percentage = occupancy / 3600 * 100\n #speed = np.array(speed)\n\n if graph:\n visualization.plot_data_over_time(time, volume, title=\"Detector {} Volume 2017\".format(detector_id), ylabel=\"Volume (vph)\", figsize=(12, 5))\n visualization.plot_data_over_time(time, occupancy, title=\"Detector {} Occupancy 2017\".format(detector_id), ylabel=\"Occupancy (s)\", figsize=(12, 5))\n #visualization.plot_data_over_time(time, speed, title=\"Detector {} Speed 2017\".format(detector_id), ylabel=\"Speed\", figsize=(12, 5))\n visualization.plot_data_over_time(time, occupancy_percentage, title=\"Detector {} Occupancy 2017\".format(detector_id), ylabel=\"Occupancy (%)\", figsize=(12, 5))\n visualization.plot_fundamental_diagram(volume, occupancy_percentage, title=\"Detector {} Flow-Occupancy Diagram 2017\".format(detector_id))\n\n return time, volume, occupancy\n\n\n\nif __name__ == '__main__':\n cnx = mysql.connect_to_database(**config)\n\n if cnx == None:\n sys.exit()\n\n cursor = cnx.cursor()\n\n time, flow, occupancy = query_detector_data(cursor, DETECTOR_DATA_TABLE, DETECTOR_ID)\n\n cursor.close()\n cnx.close()\n", "sub_path": "old/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "lib.mysql_utils.query", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.mysql_utils", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "lib.visualization.plot_data_over_time", "line_number": 46, "usage_type": "call"}, {"api_name": "lib.visualization", "line_number": 46, "usage_type": "name"}, {"api_name": "lib.visualization.plot_data_over_time", "line_number": 47, "usage_type": "call"}, {"api_name": "lib.visualization", "line_number": 47, "usage_type": "name"}, {"api_name": "lib.visualization.plot_data_over_time", "line_number": 49, "usage_type": "call"}, {"api_name": "lib.visualization", "line_number": 49, "usage_type": "name"}, {"api_name": "lib.visualization.plot_fundamental_diagram", "line_number": 50, "usage_type": "call"}, {"api_name": "lib.visualization", "line_number": 50, "usage_type": "name"}, {"api_name": "lib.mysql_utils.connect_to_database", "line_number": 57, "usage_type": "call"}, {"api_name": "lib.mysql_utils", "line_number": 57, "usage_type": "name"}, {"api_name": "config.config.config", "line_number": 57, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "3731816", "text": "from app.models import Feedback\nfrom django import forms\nfrom django.db.models import fields\nfrom .models import Profile\nfrom django.forms.fields import IntegerField\nfrom .models import PurchaseModel, Purchase, ServiceRequest\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ('Full_Name', 'gender', 'mobile', 'pic','address')\n\nclass PurchaseForm(forms.ModelForm):\n class Meta:\n model = PurchaseModel\n fields = ('name','qty','total_amt')\n # widgets = {\n # 'qty':forms.HiddenInput(),\n # }\n\nclass PurchaseModel(forms.ModelForm):\n class Meta:\n model = Purchase\n fields = ('user','equipment','price')\n # widgets = {\n # 'qty':forms.HiddenInput(),\n # \n # }\nclass ServiceRequestForm(forms.ModelForm):\n class Meta:\n model = ServiceRequest\n fields = ('durations', 'gender', 'age', 'request_for')\n \n\nclass FeedbackForm(forms.ModelForm):\n \"\"\"Form definition for Feedback.\"\"\"\n\n class Meta:\n \"\"\"Meta definition for Feedbackform.\"\"\"\n\n model = Feedback\n fields = ('name','email','phone','message')\n ", "sub_path": "app/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1173, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Profile", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.fields", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "models.PurchaseModel", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Purchase", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "models.ServiceRequest", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.fields", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "app.models.Feedback", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.fields", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "573308650", "text": "import os\nimport sqlite3\nimport requests\nimport json\nfrom flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'gw2chars.db'),\n SECRET_KEY='bleh',\n USERNAME='mp',\n PASSWORD='mp'\n))\napp.config.from_envvar('FLASKR_SETTINGS', silent=True)\n\ndef connect_db():\n conn = sqlite3.connect(app.config['DATABASE'])\n conn.row_factory = sqlite3.Row\n return conn\n\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n@app.teardown_appcontext\ndef close_db(error):\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\ndef init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n@app.cli.command('initdb')\ndef initdb_command():\n init_db()\n print('Initialized Database')\n\n@app.route('/', methods=['GET', 'POST'])\ndef show_accounts():\n if request.method == 'POST':\n session['account'] = request.form.get('accounts')\n return redirect(url_for('show_characters'))\n db = get_db()\n cur = db.execute('SELECT ID, Account_ID, API_Key FROM Accounts')\n accs = cur.fetchall()\n return render_template('show_accounts.html', accounts=accs)\n\n@app.route('/account', methods=['GET'])\ndef show_characters():\n if 'account' in session:\n acc = session['account']\n db = get_db()\n cur = db.execute('SELECT ID, Name, Level, Race, Profession FROM Characters WHERE Account_ID = ? ORDER BY Name', [acc])\n chars = cur.fetchall()\n cur = db.execute('SELECT e.Character_ID, e.API_Equipment_ID, e.Slot, e.Name, e.Level, e.Rarity, e.Icon, e.Stats, st.StatType, (SELECT GROUP_CONCAT(eu.Name) FROM EquipmentUpgrades eu WHERE eu.Equipment_ID = e.ID) AS Upgrades FROM Equipment e INNER JOIN StatTypes st ON e.StatType_ID = st.Stat_ID AND st.Account_ID = e.Account_ID WHERE e.Account_ID = ?', [acc])\n equips = cur.fetchall()\n return render_template('show_characters.html', characters=chars, equipment=equips)\n else:\n return redirect(url_for('show_accounts'))\n \n\n@app.route('/refresh', methods=['POST'])\ndef refresh_from_api():\n if 'account' in session:\n acc = session['account']\n ak = RefreshCharacterInfo(acc)\n return redirect(url_for('show_characters'))\n else:\n return redirect(url_for('show_accounts'))\n\ndef CleanDatabase(accountid):\n db = get_db()\n db.execute(\"DELETE FROM Characters WHERE Account_ID = ?\", [accountid])\n db.execute(\"DELETE FROM Equipment WHERE Account_ID = ?\", [accountid])\n db.execute(\"DELETE FROM StatTypes WHERE Account_ID = ?\", [accountid])\n db.execute(\"DELETE FROM EquipmentUpgrades WHERE Account_ID = ?\", [accountid])\n db.commit()\n\ndef GetCharacters(apikey):\n chars = requests.get(url='https://api.guildwars2.com/v2/characters?access_token=' + apikey).json()\n return chars\n\ndef GetCharacterInfo(apikey, characterName):\n r = requests.get(url='https://api.guildwars2.com/v2/characters/' + characterName + '?access_token=' + apikey)\n j = json.loads(r.text)\n characterinfo = dict(race=j['race'],profession=j['profession'],level=j['level'],equipment=j['equipment'],created=j['created'])\n return characterinfo\n\ndef GetEquipmentInfo(equipmentIDs):\n r = requests.get(url='https://api.guildwars2.com/v2/items?ids=' + equipmentIDs)\n j = json.loads(r.text)\n return j\n\ndef GetEquipmentStatType(statIDs):\n r = requests.get(url='https://api.guildwars2.com/v2/itemstats?ids=' + statIDs)\n j = json.loads(r.text)\n return j\n\ndef GetUpgradeInfo(upgradeIDs):\n r = requests.get(url='https://api.guildwars2.com/v2/items?ids=' + upgradeIDs)\n j = json.loads(r.text)\n return j\n\ndef GetSkinInfo(skinIDs):\n r = requests.get(url='https://api.guildwars2.com/v2/skins?ids=' + skinIDs)\n j = json.loads(r.text)\n return j\n\ndef RefreshCharacterInfo(accountid):\n CleanDatabase(accountid)\n db = get_db()\n cur = db.execute('SELECT API_Key FROM Accounts WHERE ID = ?', [accountid])\n apikey = cur.fetchone()[0]\n \n gw2characters = GetCharacters(apikey)\n\n for gw2character in gw2characters:\n gw2characterinfo = GetCharacterInfo(apikey, gw2character)\n \n character_id = 0\n \n cur = db.cursor()\n cur.execute(\"INSERT INTO Characters (Account_ID, Name, Level, Race, Profession) VALUES (?, ?, ?, ?, ?)\", [accountid, gw2character, gw2characterinfo['level'], gw2characterinfo['race'], gw2characterinfo['profession']])\n character_id = cur.lastrowid\n db.commit()\n equipment_ids = ''\n\n for equipment in gw2characterinfo['equipment']:\n equipment_ids = equipment_ids + str(equipment['id']) + ','\n \n cur.execute(\"INSERT INTO Equipment (Account_ID, Character_ID, API_Equipment_ID, Slot) VALUES (?, ?, ?, ?)\", [accountid, character_id, str(equipment['id']), str(equipment['slot'])])\n last_equipment_id = cur.lastrowid\n db.commit()\n\n if \"stats\" in equipment:\n statraw = equipment['stats']['attributes']\n statformatted = ', '.join(\"{!s} {!r}\".format(key, val) for (key,val) in statraw.items())\n db.execute(\"UPDATE Equipment SET Stats = ?, StatType_ID = ? WHERE Account_ID = ? AND Character_ID = ? AND API_Equipment_ID = ?\", [statformatted, str(equipment['stats']['id']), accountid, character_id, str(equipment['id'])])\n\n if \"upgrades\" in equipment:\n for upgrade in equipment['upgrades']:\n db.execute(\"INSERT INTO EquipmentUpgrades (Account_ID, API_Equipment_ID, Equipment_ID, Upgrade_ID) VALUES (?, ?, ?, ?)\", [accountid, str(equipment['id']), last_equipment_id, str(upgrade)])\n \n if \"skin\" in equipment:\n db.execute(\"UPDATE Equipment SET Skin_ID = ? WHERE ID = ?\", [str(equipment['skin']), str(last_equipment_id)])\n\n db.commit()\n\n equipmentdetails = GetEquipmentInfo(equipment_ids[:-1])\n\n for equipitem in equipmentdetails:\n if (equipitem['type'] != 'Gathering'):\n if(\"details\" in equipitem and \"infix_upgrade\" in equipitem['details']):\n statraw = equipitem['details']['infix_upgrade']['attributes']\n statformatted = ''\n for stat in statraw:\n stattemp = ' '.join(\"{}\".format(val) for (key,val) in stat.items())\n statformatted = statformatted + stattemp + ', '\n \n db.execute(\"UPDATE Equipment SET NAME = ?, LEVEL = ?, Rarity = ?, ICON = ?, STATS = ?, StatType_ID = ? WHERE CHARACTER_ID = ? AND API_Equipment_ID = ?\", [equipitem['name'],equipitem['level'], equipitem['rarity'], equipitem['icon'], statformatted[:-2], equipitem['details']['infix_upgrade']['id'], character_id, equipitem['id']])\n else:\n db.execute(\"UPDATE Equipment SET NAME = ?, LEVEL = ?, Rarity = ?, ICON = ? WHERE CHARACTER_ID = ? AND API_Equipment_ID = ?\", [equipitem['name'],equipitem['level'], equipitem['rarity'], equipitem['icon'], character_id, equipitem['id']])\n db.commit()\n\n cur = db.execute('SELECT GROUP_CONCAT(DISTINCT StatType_ID) FROM Equipment WHERE Account_ID = ?', [accountid])\n statTypeIDs = cur.fetchone()[0]\n\n stattypes = GetEquipmentStatType(statTypeIDs)\n\n for stattype in stattypes:\n db.execute(\"INSERT INTO StatTypes (Account_ID, Stat_ID, StatType) VALUES (?, ?, ?)\", [accountid, stattype['id'], stattype['name']])\n db.commit()\n\n cur = db.execute('SELECT GROUP_CONCAT(DISTINCT Upgrade_ID) FROM EquipmentUpgrades WHERE Account_ID = ?', [accountid])\n upgradeIDs = cur.fetchone()[0]\n\n upgrades = GetUpgradeInfo(upgradeIDs)\n\n for upgradeinfo in upgrades:\n db.execute(\"UPDATE EquipmentUpgrades SET Name = ? WHERE Account_ID = ? AND Upgrade_ID = ?\", [upgradeinfo['name'], accountid, upgradeinfo['id']])\n db.commit()\n\n cur = db.execute('SELECT GROUP_CONCAT(DISTINCT Skin_ID) FROM Equipment WHERE Skin_ID IS NOT NULL AND Account_ID = ?', [accountid])\n skinIDs = cur.fetchone()[0]\n\n skins = GetSkinInfo(skinIDs)\n\n for skin in skins:\n db.execute('UPDATE Equipment SET Name = ?, Icon = ? WHERE Skin_ID = ?', [skin['name'], skin['icon'], skin['id']])\n db.commit()\n\n\nif __name__ == '__main__':\n app.run()", "sub_path": "gw2chars/gw2chars.py", "file_name": "gw2chars.py", "file_ext": "py", "file_size_in_byte": 8523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 24, "usage_type": "argument"}, {"api_name": "flask.g.sqlite_db", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.g.sqlite_db", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.g", "line_number": 30, "usage_type": "argument"}, {"api_name": "flask.g.sqlite_db.close", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.g.sqlite_db", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 75, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 96, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 101, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 102, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 106, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 107, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 111, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "257921062", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 24 23:19:28 2018\r\n\r\n@author: Dimple Shah\r\n\"\"\"\r\n\r\nimport surprise\r\nimport numpy as np\r\nimport pandas as pd\r\nimport sklearn.preprocessing as sk\r\nfrom surprise import Dataset\r\nfrom surprise import Reader\r\n#from memory_profiler import profile\r\n\r\nimport os\r\nimport psutil\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nstart = time.time()\r\n\r\nprocess = psutil.Process(os.getpid())\r\nm1=process.memory_full_info().uss\r\n\r\nx=[]\r\ntimex=[]\r\nmem=[]\r\n\r\nclass MatrixFacto(surprise.AlgoBase):\r\n '''A basic rating prediction algorithm based on matrix factorization.'''\r\n \r\n def __init__(self, learning_rate, n_epochs, n_factors):\r\n \r\n self.lr = learning_rate # learning rate for SGD\r\n self.n_epochs = n_epochs # number of iterations of SGD\r\n self.n_factors = n_factors # number of factors\r\n self.skip_train = False\r\n \r\n def train(self, trainset):\r\n '''Learn the vectors p_u and q_i with SGD'''\r\n \r\n print('Fitting data with SGD...')\r\n \r\n # Randomly initialize the user and item factors.\r\n p = np.random.normal(0, .1, (trainset.n_users, self.n_factors))\r\n q = np.random.normal(0, .1, (trainset.n_items, self.n_factors))\r\n \r\n # SGD procedure\r\n for _ in range(self.n_epochs):\r\n for u, i, r_ui in trainset.all_ratings():\r\n err = r_ui - np.dot(p[u], q[i])\r\n # Update vectors p_u and q_i\r\n p[u] += self.lr * err * q[i]\r\n q[i] += self.lr * err * p[u]\r\n # Note: in the update of q_i, we should actually use the previous (non-updated) value of p_u.\r\n # In practice it makes almost no difference.\r\n \r\n self.p, self.q = p, q\r\n self.trainset = trainset\r\n\r\n def estimate(self, u, i):\r\n '''Return the estmimated rating of user u for item i.'''\r\n \r\n # return scalar product between p_u and q_i if user and item are known,\r\n # else return the average of all ratings\r\n if self.trainset.knows_user(u) and self.trainset.knows_item(i):\r\n return np.dot(self.p[u], self.q[i])\r\n else:\r\n return self.trainset.global_mean\r\n\r\n\r\nreader = Reader(rating_scale=(0, 1))\r\n\r\n\r\n#Checking RMSE with 300k data records\r\n\r\nratings_list1 = [i.strip().split(\",\") for i in open('C:\\\\Users\\\\Dimple Shah\\\\Desktop\\\\mtech\\\\reco\\\\3l.csv', 'r').readlines()]\r\nratings_df1 = pd.DataFrame(ratings_list1, columns = ['UserID', 'BookID', 'Rating'], dtype = float)\r\n\r\nratings_df1.loc[:,'Rating'] = sk.minmax_scale( ratings_df1.loc[:,'Rating'] )\r\n\r\n\r\ndata1 = Dataset.load_from_df(ratings_df1[['UserID', 'BookID', 'Rating']], reader)\r\n\r\ndata1.split(2) # split data for 2-folds cross validation\r\nalgo1 = MatrixFacto(learning_rate=.01, n_epochs=10, n_factors=10)#print(algo)\r\n#test_rms=\r\nresult1=surprise.evaluate(algo1, data1, measures=['RMSE'])#print(test_rms)\r\nx.append(np.mean(result1['RMSE']))\r\nend = time.time()\r\n#print(\"Time1\",end - start)\r\ntimex.append(end-start)\r\nprocess=psutil.Process(os.getpid())\r\nm2=process.memory_full_info().uss\r\n#m2=m2-m1\r\nprint(m2)\r\nmem.append(m2)\r\n\r\n\r\n#Checking RMSE with 500k data records\r\n\r\n\r\nstart = time.time()\r\nratings_list2 = [i.strip().split(\",\") for i in open('C:\\\\Users\\\\Dimple Shah\\\\Desktop\\\\mtech\\\\reco\\\\5l.csv', 'r').readlines()]\r\nratings_df2 = pd.DataFrame(ratings_list2, columns = ['UserID', 'BookID', 'Rating'], dtype = float)\r\n\r\nratings_df2.loc[:,'Rating'] = sk.minmax_scale( ratings_df2.loc[:,'Rating'] )\r\n\r\n\r\ndata2 = Dataset.load_from_df(ratings_df2[['UserID', 'BookID', 'Rating']], reader)\r\n\r\ndata2.split(2) # split data for 2-folds cross validation\r\nalgo2 = MatrixFacto(learning_rate=.01, n_epochs=10, n_factors=10)#print(algo)\r\nresult2= surprise.evaluate(algo2, data2, measures=['RMSE'])#print(test_rms)\r\nx.append(np.mean(result2['RMSE']))\r\nend = time.time()\r\n#print(\"Time2\",end - start)\r\ntimex.append(end-start)\r\nprocess=psutil.Process(os.getpid())\r\nm3=process.memory_full_info().uss\r\n#m3=m3-m1\r\nprint(m3)\r\nmem.append(m3)\r\n\r\n\r\n#Checking RMSE with 600k data records\r\n\r\n\r\nstart = time.time()\r\nratings_list3 = [i.strip().split(\",\") for i in open('C:\\\\Users\\\\Dimple Shah\\\\Desktop\\\\mtech\\\\reco\\\\6l.csv', 'r').readlines()]\r\nratings_df3 = pd.DataFrame(ratings_list3, columns = ['UserID', 'BookID', 'Rating'], dtype = float)\r\n\r\nratings_df3.loc[:,'Rating'] = sk.minmax_scale( ratings_df3.loc[:,'Rating'] )\r\n\r\n\r\ndata3 = Dataset.load_from_df(ratings_df3[['UserID', 'BookID', 'Rating']], reader)\r\n\r\ndata3.split(2) # split data for 2-folds cross validation\r\nalgo3 = MatrixFacto(learning_rate=.01, n_epochs=10, n_factors=10)#print(algo)\r\nresult3= surprise.evaluate(algo3, data3, measures=['RMSE'])#print(test_rms)\r\nx.append(np.mean(result3['RMSE']))\r\nend = time.time()\r\n#print(\"Time3\",end - start)\r\ntimex.append(end-start)\r\nprocess=psutil.Process(os.getpid())\r\nm4=process.memory_full_info().uss\r\n#m4=m4-m1\r\nprint(m4)\r\nmem.append(m4)\r\n\r\n\r\n#plotting graph for the time taken for different number of records\r\n\r\ny = [len(ratings_list1),len(ratings_list2),len(ratings_list3)]\r\nplt.plot( timex[0],y[0],'ro',label='300000 records')\r\nplt.plot( timex[1], y[1],'bo',label='500000 records')\r\nplt.plot( timex[2],y[2],'go',label='600000 records')\r\nlegend = plt.legend(loc='upper left')\r\nframe = legend.get_frame()\r\nplt.xlabel('Time(in sec)')\r\nplt.ylabel('Number of records')\r\nplt.title('Time Vs Number of Records')\r\nplt.legend()\r\nplt.show()\r\n\r\n#plotting graph for the RMSE for different number of records\r\n\r\ny = [len(ratings_list1),len(ratings_list2),len(ratings_list3)]\r\nplt.plot( x[0],y[0],'gs',label='300000 records')\r\nplt.plot( x[1],y[1],'rs',label='500000 records')\r\nplt.plot( x[2],y[2],'bs',label='600000 records')\r\nlegend = plt.legend(loc='upper left')\r\nframe = legend.get_frame()\r\nplt.xlabel('Mean RMSE')\r\nplt.ylabel('Number of records')\r\nplt.title('Mean RMSE Vs Number of Records')\r\nplt.legend()\r\n\r\nplt.show()\r\n\r\n\r\n#plotting graph for the memory space taken for different number of records\r\n\r\ny = [len(ratings_list1),len(ratings_list2),len(ratings_list3)]\r\nplt.plot( mem[0],y[0],'g^',label='30000 records')\r\nplt.plot( mem[1],y[1],'r^',label='500000 records')\r\nplt.plot( mem[2],y[2],'b^',label='600000 records')\r\nlegend = plt.legend(loc='upper left')\r\nframe = legend.get_frame()\r\nplt.xlabel('Memory Usage')\r\nplt.ylabel('Number of records')\r\nplt.title('Memory Usage Vs Number of Records')\r\nplt.legend()\r\n\r\nplt.show()", "sub_path": "201711001_Dimple/Assignment_5/Surprise_with_ratings_SGD.py", "file_name": "Surprise_with_ratings_SGD.py", "file_ext": "py", "file_size_in_byte": 6446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 22, "usage_type": "call"}, {"api_name": "surprise.AlgoBase", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 67, "usage_type": "call"}, {"api_name": "surprise.Reader", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.minmax_scale", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 80, "usage_type": "name"}, {"api_name": "surprise.Dataset.load_from_df", "line_number": 83, "usage_type": "call"}, {"api_name": "surprise.Dataset", "line_number": 83, "usage_type": "name"}, {"api_name": "surprise.evaluate", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 93, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.minmax_scale", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 107, "usage_type": "name"}, {"api_name": "surprise.Dataset.load_from_df", "line_number": 110, "usage_type": "call"}, {"api_name": "surprise.Dataset", "line_number": 110, "usage_type": "name"}, {"api_name": "surprise.evaluate", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 115, "usage_type": "call"}, {"api_name": "time.time", "line_number": 116, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 119, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 119, "usage_type": "call"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.minmax_scale", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 133, "usage_type": "name"}, {"api_name": "surprise.Dataset.load_from_df", "line_number": 136, "usage_type": "call"}, {"api_name": "surprise.Dataset", "line_number": 136, "usage_type": "name"}, {"api_name": "surprise.evaluate", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 141, "usage_type": "call"}, {"api_name": "time.time", "line_number": 142, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 145, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}]} +{"seq_id": "336644185", "text": "import requests\nimport random\nimport json\nfrom flask import Flask, request\nimport os\nfrom RandomPicker import Picker\n#from random_picker.app.RandomPicker import Picker\n\nlisten_port = os.environ.get(\"LISTEN_PORT\", 5005)\ndb_port = os.environ.get(\"API_PORT\", 5001)\nmongo_host = os.environ.get(\"API_DB_HOST\", \"52.255.160.180\")\nqueue_size = os.environ.get(\"QUEUE_SIZE\", 1)\ndb_host = \"http://{0}:{1}\".format(mongo_host, db_port)\n\napp = Flask(__name__)\nthe_picker = Picker(db_host = db_host, queue_size = queue_size)\n\n\n@app.route('/get-server', methods=['POST'])\ndef get_server():\n json_object = request.get_json()\n try:\n group = json_object[\"group\"]\n print(group)\n print(the_picker.get_server(group))\n except KeyError:\n return \"group is a required parameter\", 400\n return the_picker.get_server(group), 200\n\n@app.route('/get-fault', methods=['POST'])\ndef get_fault():\n json_object = request.get_json()\n\n return the_picker.get_fault(json_object), 200\n\n@app.route('/pick-data',methods=['GET'])\ndef get_picker_data():\n return the_picker.show_data()\n\n@app.route('/test',methods=['GET'])\ndef test():\n return \"hello world\"\n\nif __name__ == '__main__':\n app.run(port=listen_port, host='0.0.0.0', debug=True)\n", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "RandomPicker.Picker", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "18930472", "text": "\nimport random\nimport argparse\nimport re\nimport os.path as path\nimport sys\nimport numpy as np\nimport torch\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets, transforms\nfrom nltk.translate.bleu_score import sentence_bleu\nimport model2 as model\nfrom PIL import Image\nimport data_utils\n\n\n\n## 固定変数の宣言\n\n# ミニバッチサイズを定義\nbatch_size = 32\n\n# 学習データセット数(画像と文章のセット数)を定義\ntraining_data_number = 960\n\n##vocabulary = 22635\nvocabulary = 1083\n\n# 学習率を定義\nlearning_rate = 1e-7\n\n# 学習回数(エポック数を定義)\nepochs = 201\n\n# causal 畳み込みネットワークの層数を定義\ndecoder_layers_number = 6\n\n# 単語ベクトルの要素数を定義\nword_embedding_size = 300\n\n# バリデーションのデータセット数(画像と文章のセット数)を定義\nvalidation_data_number = 10\n\n#計算環境の指定\ndevice = torch.device('cuda' if torch.cuda.is_available else 'cpu')\n\n\n\n# この関数をメイン関数として実行\ndef main():\n\n # 逆引き、正引きの単語辞書を作成\n stoi, itos = data_utils.get_dicts()\n\n # 単語辞書の大きさ、単語のベクトルの大きさなどを指定してインスタンスを生成\n cnn_cnn = model.CNN_CNN_CE(len(stoi), word_embedding_size, n_layers=decoder_layers_number, train_cnn=True)\n \n # インスタンス変数のデータをCPUメモリからGPUメモリ上に移動\n cnn_cnn = cnn_cnn.to(device)\n\n # エポックを指定して学習済みモデルのパラメータをロード\n ##cnn_cnn.load(90)\n\n # ネットワークの重みを更新するアルゴリズムを定義\n optimizer = optim.Adam([\n {'params': cnn_cnn.language_module.parameters()},\n {'params': cnn_cnn.prediction_module.parameters()},\n {'params': cnn_cnn.attention_module.parameters()},\n ], lr=1e-4, weight_decay=0.1e-5)\n\n \n # 損失関数:予測する要素(単語)としてのクロスエントロピー\n criterion = torch.nn.NLLLoss()\n\n \n ## -------------- ニューラルネットワークの学習----------------------\n \n # BLEU-SCOREを格納するリストを宣言\n bleu_score = []\n \n \n # 指定したエポック数だけ繰り返し処理を実行\n for e in range(epochs):\n \n # ジェネレータ関数からイテレータを作成\n trainloader = data_utils.training_dataloader(training_data_number, batch_size, stoi)\n \n \n # バッチサイズ分の繰り返し処理(イテレータからデータを取り出していく)\n for batch, (images, captions) in enumerate(trainloader):\n\n # 画像キャプションの学習データ/教師データを作成\n train_indices_v, expected_indices_v, train_labels, max_length= data_utils.make_train_caption_datasets(stoi, vocabulary, captions)\n \n # 学習用の画像と画像キャプションデータをCPUメモリ上からGPUメモリ上に移動\n images_v = images.to(device)\n train_indices_v = train_indices_v.to(device)\n expected_indices_v = expected_indices_v.to(device)\n \n # 正解データの画像キャプションの単語IDのインデックスを格納するためのリスト\n valid_training_indices = []\n \n \n # バッチサイズ分の学習用画像キャプションの単語に、順番に1から総単語数の番号を振る\n for i, label in enumerate(train_labels):\n \n # 各キャプションの単語数分リスト内で繰り返し処理で、インデックスのみ取り出す\n valid_training_indices = valid_training_indices + [j for j in range(i*(max_length), i*(max_length) + len(label))]\n ##print('valid_training_indices', valid_training_indices)\n\n \n # view(-1)で行列からベクトルに変換し、上で作成したインデックスで指定した位置の要素を取り出す\n valid_expected_v = expected_indices_v.view(-1)[valid_training_indices]\n ##print('\\n expected_indices_v',expected_indices_v)\n ##print('\\n valid_expected_v',valid_expected_v)\n\n # Limpia los optimizadores\n optimizer.zero_grad()\n\n # Calcula las predicciones\n outputs_v = cnn_cnn(images_v, train_indices_v)\n\n # 生成された画像キャプションのサイズを変更\n outputs_v = outputs_v.view(-1, cnn_cnn.vocab_size)\n\n # 予測/正解キャプションを損失関数の引数にして、誤差を算出\n loss = criterion(outputs_v[valid_training_indices], valid_expected_v)\n\n # ニューラルネットワークの誤差逆伝播を実行\n loss.backward()\n\n # Clip gradients\n torch.nn.utils.clip_grad_norm_(cnn_cnn.parameters(), 1.0)\n\n # 誤差逆伝播勾配をもとにニューラルネットワークのパラメータを更新\n optimizer.step()\n \n \n # BLUEスコアをバリデーションの度に初期化\n bleu_score_epoch = 0\n\n # バリデーション用の画像/画像キャプションをロード\n images_for_validation, captions_for_validation = data_utils.validation_dataloader(stoi)\n\n\n # バリデーションデータ数回繰り返し処理\n for i in range(validation_data_number):\n\n # 検証用の画像を選択\n img = images_for_validation[i]\n \n # 変数データをCPUメモリからGPUメモリに移動\n img = img.to(device)\n \n # 画像のサイズを(3, 224, 224)から(1, 3, 224, 224)に変換\n img = img.view(1, *img.shape)\n \n # 検証用の画像でキャプションを予測\n sentence = cnn_cnn.sample(img, stoi, itos)[0]\n\n # 正解画像キャプションを選択\n captions = captions_for_validation[i]\n\n print('\\nPredicted image caption: ',sentence)\n print('Correct image caption: ',captions)\n\n # sentence_bleuを計算\n bleu_score_epoch += sentence_bleu(captions, sentence, weights=(0.25, 0.25, 0.25, 0.25))\n\n \n # 検証用データでのBLEUスコアの平均値を算出\n bleu_score.append(bleu_score_epoch / validation_data_number)\n\n \n # blue_scoreに最大値のものが格納されたら、その時のモデルを保存する\n if(e%10==0 or (bleu_score[-1] == max(bleu_score))):\n\n # モデルを保存\n cnn_cnn.save(e)\n \n \n \n# メインモジュールとしてこのファイルを実行\nif __name__ == \"__main__\":\n \n # このファイルの上記に定義したmain関数を実行\n main()\n", "sub_path": "train_cnn_cnn_book-english.py", "file_name": "train_cnn_cnn_book-english.py", "file_ext": "py", "file_size_in_byte": 7078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.device", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 47, "usage_type": "attribute"}, {"api_name": "data_utils.get_dicts", "line_number": 55, "usage_type": "call"}, {"api_name": "model2.CNN_CNN_CE", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "attribute"}, {"api_name": "data_utils.training_dataloader", "line_number": 88, "usage_type": "call"}, {"api_name": "data_utils.make_train_caption_datasets", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "attribute"}, {"api_name": "data_utils.validation_dataloader", "line_number": 145, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.sentence_bleu", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "402226183", "text": "#!/usr/bin/env python\n\nimport re\nimport argparse\nimport json, urllib, urllib2, ConfigParser, os, sys\nfrom datetime import datetime\n\nhttp_errors = r'HTTP Error ([45]0[02])'\nrequest_data = r'\\s*request: (\\{.*)$'\n\nhttp_re = re.compile(http_errors)\nrequest_re = re.compile(request_data)\n\ndef request_generator(instream):\n message_on_next = False\n code = 200\n for line in instream:\n if message_on_next:\n message_on_next = False\n m = request_re.match(line)\n request = {}\n if m:\n try:\n request = json.loads(m.group(1))\n except ValueError:\n pass\n yield code, request\n\n m = http_re.match(line)\n if m:\n code = int(m.group(1))\n message_on_next = True\n\nif __name__ == '__main__':\n sys.stderr.write(\"entering __main__\\n\")\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\",\"--site\", default='test1', help=\"The site id to post to, e.g. 'test1'\")\n parser.add_argument(\"-i\",\"--instrument\", help=\"The instrument to post to, e.g. 'sonde'\")\n\n host = 'http://localhost:8080'\n endpoint = '/observations'\n\n args = parser.parse_args()\n\n if not args.instrument:\n sys.stderr.write(\"Error: instrument argument is required\")\n\n sys.stderr.write(\"about to read lines\\n\")\n for code, request in request_generator(sys.stdin):\n url = urllib2.Request(host + urllib2.quote('/sites/' + args.site + '/instruments/' \\\n + args.instrument + endpoint), json.dumps(request, indent=4),\n {'Content-Type': 'application/json'})\n try:\n response = urllib2.urlopen(url)\n except urllib2.HTTPError as e:\n if not e.code == 409:\n sys.stderr.write(\"{}\\n\\trequest: {}\\n\".format(e, json.dumps(request)))\n response = None\n else:\n sys.stderr.write(\"conflict, probably due to duplicate data\\n\")\n except urllib2.URLError as e:\n sys.stderr.write(\"{}\\n\\turl: {}\\n\".format(e, url.get_full_url()))\n response = None\n\n\n", "sub_path": "tests/retrylogs.py", "file_name": "retrylogs.py", "file_ext": "py", "file_size_in_byte": 2192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "re.compile", "line_number": 11, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 35, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.stdin", "line_number": 50, "usage_type": "attribute"}, {"api_name": "urllib2.Request", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib2.quote", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 52, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib2.HTTPError", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 58, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 61, "usage_type": "attribute"}, {"api_name": "urllib2.URLError", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "232842904", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport re\nheaders = requests.utils.default_headers()\nheaders.update({ 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'})\na = 0\ninternalLinks = []\nexternalLinks = []\nbaseUrl = 'https://www.joomshaper.com'\ndef link_spyder(u): # Define function\n global a\n global internalLinks\n global externalLinks\n print(\"Fatching url : \",u)\n req = requests.get(u, headers) # Perse Given link\n soup = BeautifulSoup(req.content, 'html.parser') # Perse as HTML content\n anchors = soup.find_all('a') # Perse all anchor tags\n for link in anchors: # Perse links of ref\n ref = link.get('href')\n # print(\"ref=========\",ref)\n if ref != '#' and ref!= None and ref != '/' and ref != '/index.php' and not re.compile(r'{}|\\(|\\)|\\#'.format(u)).search(ref): # Filter ref\n if re.compile(r'http').search(ref):\n externalLinks.append(ref)\n elif baseUrl+ref not in internalLinks:\n gotInternalUrl = baseUrl+ref\n internalLinks.append(gotInternalUrl)\n print(\"gotInternalUrl\",gotInternalUrl)\n a += 1\n print(a, len(internalLinks))\n print(str(internalLinks[a-1]))\n print('---')\n link_spyder(str(internalLinks[a-1]))\nlink_spyder(baseUrl)", "sub_path": "scraping_request_mod.py", "file_name": "scraping_request_mod.py", "file_ext": "py", "file_size_in_byte": 1325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.utils.default_headers", "line_number": 4, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 4, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 21, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "355763302", "text": "import itertools\r\n\r\ndef sumtozero(somelist):\r\n somelist=list(itertools.permutations(somelist, r=3))\r\n resultlist=[]\r\n for (x,y,z) in somelist:\r\n\t#print(x,y,z)\r\n if x+y+z==0:\r\n resultlist.append(list([x,y,z]))\r\n\r\n #resultlist=list(sorted(resultlist))\r\n #print(resultlist)\r\n \r\n newandfew=[]\r\n x=[]\r\n for each in resultlist:\r\n x=list(sorted(each))\r\n\r\n if x not in newandfew:\r\n newandfew.append(x)\r\n \r\n\r\n #sorted(newandfew, key=lambda x:(x[2], -x[1]))\r\n print(newandfew)\r\n return(None)\r\n \r\n \r\n\r\n\r\n", "sub_path": "Day2/q3modified.py", "file_name": "q3modified.py", "file_ext": "py", "file_size_in_byte": 593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "itertools.permutations", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "448193150", "text": "import enum\nimport time\nimport heapq\nimport asyncio\nimport logging\nimport calendar\nimport datetime\nimport functools\nimport itertools\nimport collections\nfrom datetime import timezone as tz\nfrom collections.abc import Iterable, Mapping\n\nimport synapse.exc as s_exc\nimport synapse.common as s_common\n\nimport synapse.lib.base as s_base\nimport synapse.lib.coro as s_coro\nimport synapse.lib.config as s_config\nimport synapse.lib.provenance as s_provenance\n\n# Agenda: manages running one-shot and periodic tasks in the future (\"appointments\")\n\nlogger = logging.getLogger(__name__)\n\nreqValidCdef = s_config.getJsValidator({\n 'type': 'object',\n 'properties': {\n 'storm': {'type': 'string'},\n 'creator': {'type': 'string', 'pattern': s_config.re_iden},\n 'iden': {'type': 'string', 'pattern': s_config.re_iden},\n 'view': {'type': 'string', 'pattern': s_config.re_iden},\n 'name': {'type': 'string'},\n 'doc': {'type': 'string'},\n 'incunit': {\n 'oneOf': [\n {'type': 'null'},\n {'enum': ['year', 'month', 'dayofmonth', 'dayofweek', 'day', 'hour', 'minute']}\n ]\n },\n 'incvals': {\n 'type': ['array', 'number', 'null'],\n 'items': {'type': 'number'}\n },\n 'reqs': {\n 'oneOf': [\n {\n '$ref': '#/definitions/req',\n },\n {\n 'type': ['array'],\n 'items': {'$ref': '#/definitions/req'},\n },\n ]\n },\n },\n 'additionalProperties': False,\n 'required': ['creator', 'storm'],\n 'dependencices': {\n 'incvals': ['incunit'],\n 'incunit': ['incvals'],\n },\n 'definitions': {\n 'req': {\n 'type': 'object',\n 'properties': {\n 'minute': {'oneOf': [{'type': 'number'}, {'type': 'array', 'items': {'type': 'number'}}]},\n 'hour': {'oneOf': [{'type': 'number'}, {'type': 'array', 'items': {'type': 'number'}}]},\n 'dayofmonth': {'oneOf': [{'type': 'number'}, {'type': 'array', 'items': {'type': 'number'}}]},\n 'month': {'oneOf': [{'type': 'number'}, {'type': 'array', 'items': {'type': 'number'}}]},\n 'year': {'oneOf': [{'type': 'number'}, {'type': 'array', 'items': {'type': 'number'}}]},\n }\n }\n }\n})\n\ndef _dayofmonth(hardday, month, year):\n '''\n Returns a valid day of the month given the desired value.\n\n Negative values are interpreted as offset backwards from the last day of the month, with -1 representing the\n last day of the month. Out-of-range values are clamped to the first or last day of the month.\n '''\n newday = hardday\n daysinmonth = calendar.monthrange(year, month)[1]\n if newday < 0:\n newday = daysinmonth + hardday + 1\n newday = max(1, min(newday, daysinmonth))\n return newday\n\nclass TimeUnit(enum.IntEnum):\n '''\n Unit of time that recurring and required parts of appointments are made of\n '''\n YEAR = enum.auto()\n MONTH = enum.auto()\n DAYOFMONTH = enum.auto() # e.g. every 3rd of the month\n DAYOFWEEK = enum.auto() # e.g. every Tuesday (Mon=0, Sun=6)\n DAY = enum.auto() # every day\n HOUR = enum.auto()\n MINUTE = enum.auto()\n NOW = enum.auto()\n\n @classmethod\n def fromString(cls, s):\n return cls.__members__[s.upper()]\n\n# Next largest unit for each unit\n_NextUnitMap = {\n TimeUnit.YEAR: None,\n TimeUnit.MONTH: TimeUnit.YEAR,\n TimeUnit.HOUR: TimeUnit.DAY,\n TimeUnit.MINUTE: TimeUnit.HOUR,\n TimeUnit.DAYOFMONTH: TimeUnit.MONTH,\n TimeUnit.DAYOFWEEK: TimeUnit.DAYOFWEEK,\n}\n\n# Unit equivalence to datetime arguments\n_TimeunitToDatetime = {\n TimeUnit.YEAR: 'year',\n TimeUnit.MONTH: 'month',\n TimeUnit.DAYOFMONTH: 'day',\n TimeUnit.DAYOFWEEK: 'day',\n TimeUnit.DAY: 'day',\n TimeUnit.HOUR: 'hour',\n TimeUnit.MINUTE: 'minute',\n}\n\n# The valid ranges for required and recurring\n_UnitBounds = {\n TimeUnit.YEAR: ((2000, 2999), (1, 5)),\n TimeUnit.MONTH: ((1, 12), (1, 60)),\n TimeUnit.DAYOFMONTH: ((-31, 31), (-31, 31)),\n TimeUnit.DAYOFWEEK: ((0, 6), (0, 6)),\n TimeUnit.DAY: ((0, 0), (1, 365 * 5)),\n TimeUnit.HOUR: ((0, 23), (1, 1000)),\n TimeUnit.MINUTE: ((0, 59), (1, 60 * 60 * 24 * 30))\n}\n\nclass ApptRec:\n '''\n Represents a single element of a single combination of an appointment\n '''\n def __init__(self, reqdict, incunit=None, incval=1):\n if incunit is not None:\n incunit = TimeUnit(incunit)\n self.incunit = incunit\n self.incval = incval if incunit is not None else None\n\n if not reqdict and incunit is None:\n raise s_exc.BadTime(mesg='reqdict must be nonempty or incunit must be non-None')\n\n if TimeUnit.DAY in reqdict:\n raise s_exc.BadTime(mesg='Must not specify day as requirement')\n\n if TimeUnit.DAYOFMONTH in reqdict and TimeUnit.DAYOFWEEK in reqdict:\n raise s_exc.BadTime(mesg='Both day of month and day of week must not both be requirements')\n\n if TimeUnit.DAYOFWEEK in reqdict and incunit is not None:\n raise s_exc.BadTime(mesg='Day of week requirement not supported with a recurrence')\n\n if incunit == TimeUnit.DAYOFMONTH:\n raise s_exc.BadTime(mesg='Day of month not a valid incunit')\n\n if incunit is not None:\n boundmin, boundmax = _UnitBounds[incunit][1]\n if not boundmin <= incval <= boundmax:\n raise s_exc.BadTime(mesg='Out of bounds incval')\n\n reqdict = {TimeUnit(k): v for k, v in reqdict.items()}\n self.reqdict = reqdict\n\n for reqkey, reqval in reqdict.items():\n boundmin, boundmax = _UnitBounds[reqkey][0]\n if not boundmin <= reqval <= boundmax:\n raise s_exc.BadTime(mesg='Out of bounds reqdict value')\n\n if incunit is not None and reqkey <= incunit:\n # We could, actually support this, but most of these combinations are nonsensical (e.g. run every 5\n # minutes in 2018 only?)\n raise s_exc.BadTime(mesg='Must not have fixed unit equal to or greater than recurrence unit')\n\n self.reqdict = {}\n # Put keys in size order, with dayof... added last, as nexttime processes in that order\n for key in _NextUnitMap:\n if key in reqdict:\n self.reqdict[key] = reqdict[key]\n\n def __repr__(self):\n return repr(self.pack())\n\n def pack(self):\n '''\n Make ApptRec json/msgpack-friendly\n '''\n reqdictf = {k.name.lower(): v for (k, v) in self.reqdict.items()}\n incunitf = None if self.incunit is None else self.incunit.name.lower()\n return (reqdictf, incunitf, self.incval)\n\n @classmethod\n def unpack(cls, val):\n '''\n Convert from json/msgpack-friendly\n '''\n reqdictf, incunitf, incval = val\n reqdict = {TimeUnit[k.upper()]: v for (k, v) in reqdictf.items()}\n incunit = None if incunitf is None else TimeUnit[incunitf.upper()]\n return cls(reqdict, incunit, incval)\n\n def nexttime(self, lastts):\n '''\n Returns next timestamp that meets requirements, incrementing by (self.incunit * incval) if not increasing, or\n 0.0 if there are no future matches\n '''\n lastdt = datetime.datetime.fromtimestamp(lastts, tz.utc)\n newvals = {} # all the new fields that will be changed in datetime of lastts\n\n # Truncate the seconds part\n newdt = lastdt.replace(second=0)\n\n # Note: self.reqdict is sorted from largest unit to smallest\n for unit, newval in self.reqdict.items():\n dtkey = _TimeunitToDatetime[unit]\n\n if unit is TimeUnit.DAYOFWEEK:\n newdt = newdt.replace(**newvals)\n newvals = {}\n newval = newdt.day + (6 + newval - newdt.weekday()) % 7 + 1\n if newval > calendar.monthrange(newdt.year, newdt.month)[1]:\n newval -= 7\n\n elif unit is TimeUnit.YEAR:\n # As we change the year, clamp the day of the month to a valid value (only matters on leap day)\n dayval = _dayofmonth(newdt.day, newdt.month, newval)\n newvals['day'] = dayval\n\n elif unit is TimeUnit.MONTH:\n # As we change the month, clamp the day of the month to a valid value\n newdt = newdt.replace(**newvals)\n newvals = {}\n dayval = _dayofmonth(newdt.day, newval, newdt.year)\n newvals['day'] = dayval\n\n elif unit is TimeUnit.DAYOFMONTH:\n newdt = newdt.replace(**newvals)\n newvals = {}\n newval = _dayofmonth(newval, newdt.month, newdt.year)\n\n newvals[dtkey] = newval\n\n newdt = newdt.replace(**newvals)\n\n # Then move forward if we have to\n if newdt <= lastdt or \\\n self.incunit == TimeUnit.DAYOFWEEK and newdt.weekday() != self.incval:\n if self.incunit is None:\n largest_req = min(self.reqdict.keys())\n tmpunit = _NextUnitMap[largest_req]\n if tmpunit is None: # required a year and we're already there\n return 0.0\n # Unless we're going to the next day of week, increment by 1 unit of the next larger unit\n tmpincval = self.reqdict.get(TimeUnit.DAYOFWEEK, 1)\n else:\n tmpunit = self.incunit\n tmpincval = self.incval\n newdt = self._inc(tmpunit, tmpincval, self.reqdict, lastdt, newdt)\n assert newdt > lastdt\n return newdt.timestamp()\n\n def _inc(self, incunit, incval, reqdict, origdt, dt):\n '''\n Return a datetime incremented by incunit * incval\n '''\n if incunit == TimeUnit.YEAR:\n return dt.replace(year=dt.year + incval)\n if incunit == TimeUnit.MONTH:\n newyear = dt.year\n absmonth = dt.month + incval - 1\n newmonth = absmonth % 12 + 1\n newyear += absmonth // 12\n daysinmonth = calendar.monthrange(newyear, newmonth)[1]\n dayofmonthreq = reqdict.get(TimeUnit.DAYOFMONTH)\n if dayofmonthreq is not None:\n newday = _dayofmonth(dayofmonthreq, newmonth, newyear)\n else:\n newday = min(daysinmonth, dt.day)\n return dt.replace(day=newday, month=newmonth, year=newyear)\n if incunit == TimeUnit.DAY:\n return dt + datetime.timedelta(days=incval)\n if incunit == TimeUnit.DAYOFWEEK:\n # incval in this case means next day of week whose weekday matches incval (0-6)\n days = (6 + incval - dt.weekday()) % 7 + 1\n newdt = dt + datetime.timedelta(days=days)\n assert newdt.weekday() == incval\n return newdt\n if incunit == TimeUnit.HOUR:\n return dt + datetime.timedelta(hours=incval)\n if incunit == TimeUnit.MINUTE:\n return dt + datetime.timedelta(minutes=incval)\n else:\n assert 0, 'Invalid incunit' # pragma: no cover\n\nclass _Appt:\n '''\n A single entry in the Agenda: a storm query to run in the future, potentially more than once\n\n Each such entry has a list of ApptRecs. Each time the appointment is scheduled, the nexttime of the appointment is\n the lowest nexttime of all its ApptRecs.\n '''\n def __init__(self, stor, iden, recur, indx, query, creator, recs, nexttime=None, view=None):\n self.doc = ''\n self.name = ''\n self.stor = stor\n self.iden = iden\n self.recur = recur # does this appointment repeat\n self.indx = indx # incremented for each appt added ever. Used for nexttime tiebreaking for stable ordering\n self.query = query # query to run\n self.creator = creator # user iden to run query as\n self.recs = recs # List[ApptRec] list of the individual entries to calculate next time from\n self._recidxnexttime = None # index of rec who is up next\n self.view = view\n\n if self.recur and not self.recs:\n raise s_exc.BadTime(mesg='A recurrent appointment with no records')\n\n if nexttime is None and self.recs:\n self.nexttime = self.stor._getNowTick()\n self.updateNexttime(self.nexttime)\n if self.nexttime is None:\n raise s_exc.BadTime(mesg='Appointment is in the past')\n else:\n self.nexttime = nexttime\n self.isrunning = False # whether it is currently running\n self.startcount = 0 # how many times query has started\n self.errcount = 0 # how many times this appt failed\n self.lasterrs = collections.deque((), maxlen=5)\n self.laststarttime = None\n self.lastfinishtime = None\n self.lastresult = None\n self.enabled = True\n\n def getStorNode(self, form):\n ndef = (form.name, form.type.norm(self.iden)[0])\n buid = s_common.buid(ndef)\n\n props = {\n 'doc': self.doc,\n 'name': self.name,\n 'storm': self.query,\n }\n\n pnorms = {}\n for prop, valu in props.items():\n formprop = form.props.get(prop)\n if formprop is not None and valu is not None:\n pnorms[prop] = formprop.type.norm(valu)[0]\n\n return (buid, {\n 'ndef': ndef,\n 'props': pnorms\n })\n\n def __eq__(self, other):\n ''' For heap logic to sort upcoming events lower '''\n return (self.nexttime, self.indx) == (other.nexttime, other.indx)\n\n def __lt__(self, other):\n ''' For heap logic '''\n return (self.nexttime, self.indx) < (other.nexttime, other.indx)\n\n def pack(self):\n return {\n 'ver': 1,\n 'doc': self.doc,\n 'name': self.name,\n 'enabled': self.enabled,\n 'recur': self.recur,\n 'iden': self.iden,\n 'view': self.view,\n 'indx': self.indx,\n 'query': self.query,\n 'creator': self.creator,\n 'recs': [d.pack() for d in self.recs],\n 'nexttime': self.nexttime,\n 'startcount': self.startcount,\n 'errcount': self.errcount,\n 'isrunning': self.isrunning,\n 'laststarttime': self.laststarttime,\n 'lastfinishtime': self.lastfinishtime,\n 'lastresult': self.lastresult,\n 'lasterrs': list(self.lasterrs)\n }\n\n @classmethod\n def unpack(cls, stor, val):\n if val['ver'] != 1:\n raise s_exc.BadStorageVersion(mesg=f\"Found version {val['ver']}\") # pragma: no cover\n recs = [ApptRec.unpack(tupl) for tupl in val['recs']]\n appt = cls(stor, val['iden'], val['recur'], val['indx'], val['query'], val['creator'], recs, nexttime=val['nexttime'], view=val.get('view'))\n appt.doc = val.get('doc', '')\n appt.name = val.get('name', '')\n appt.laststarttime = val['laststarttime']\n appt.lastfinishtime = val['lastfinishtime']\n appt.lastresult = val['lastresult']\n appt.enabled = val['enabled']\n\n return appt\n\n def updateNexttime(self, now):\n '''\n Find the next time this appointment should be scheduled.\n\n Delete any nonrecurring record that just happened.\n '''\n if self._recidxnexttime is not None and not self.recur:\n del self.recs[self._recidxnexttime]\n\n while self.recs and self.nexttime <= now:\n\n lowtime = 999999999999.9\n\n # Find the lowest next time of all of our recs (backwards, so we can delete)\n for i in range(len(self.recs) - 1, -1, -1):\n rec = self.recs[i]\n nexttime = rec.nexttime(self.nexttime)\n if nexttime == 0.0:\n # We blew by and missed a fixed-year appointment, either due to clock shenanigans, this query going\n # really long, or the initial requirement being in the past\n logger.warning(f'Missed an appointment: {rec}')\n del self.recs[i]\n continue\n if nexttime < lowtime:\n lowtime = nexttime\n lowidx = i\n\n if not self.recs:\n break\n\n self._recidxnexttime = lowidx\n self.nexttime = lowtime\n\n if not self.recs:\n self._recidxnexttime = None\n self.nexttime = None\n return\n\n async def setDoc(self, text, nexs=False):\n '''\n Set the doc field of an appointment.\n '''\n self.doc = text\n await self._save(nexs=nexs)\n\n async def setName(self, text, nexs=False):\n self.name = text\n await self._save(nexs=nexs)\n\n async def _save(self, nexs=False):\n await self.stor._storeAppt(self, nexs=nexs)\n\nclass Agenda(s_base.Base):\n '''\n Organize and execute all the scheduled storm queries in a cortex.\n '''\n\n async def __anit__(self, core):\n\n await s_base.Base.__anit__(self)\n\n self.core = core\n self.apptheap = [] # Stores the appointments in a heap such that the first element is the next appt to run\n self.appts = {} # Dict[bytes: Appt]\n self._next_indx = 0 # index a new appt gets assigned\n self.tickoff = 0 # Used for test overrides\n\n self._wake_event = s_coro.Event() # Causes the scheduler loop to wake up\n self.onfini(self._wake_event.set)\n\n self._hivenode = await self.core.hive.open(('agenda', 'appts')) # Persistent storage\n self.onfini(self.stop)\n\n self.enabled = False\n self._schedtask = None # The task of the scheduler loop. Doesn't run until we're enabled\n\n self._running_tasks = [] # The actively running cron job tasks\n await self._load_all()\n\n async def start(self):\n '''\n Enable cron jobs to start running, start the scheduler loop\n\n Go through all the appointments, making sure the query is valid, and remove the ones that aren't. (We can't\n evaluate queries until enabled because not all the modules are loaded yet.)\n '''\n if self.enabled:\n return\n\n await self._load_all()\n for iden, appt in self.appts.items():\n try:\n await self.core.getStormQuery(appt.query)\n except Exception as e:\n logger.exception(f'Invalid appointment {iden} {appt.name} found in storage. Disabling. {e}',\n extra={'synapse': {'iden': iden, 'name': appt.name, 'text': appt.query}})\n appt.enabled = False\n\n self._schedtask = self.schedCoro(self._scheduleLoop())\n self.enabled = True\n\n async def stop(self):\n \"Cancel the scheduler loop, and set self.enabled to False.\"\n if not self.enabled:\n return\n self._schedtask.cancel()\n for task in self._running_tasks:\n await task.fini()\n\n self.enabled = False\n\n async def _load_all(self):\n '''\n Load all the appointments from persistent storage\n '''\n # Clear existing appointments before loading\n self.apptheap = []\n self.appts = {}\n\n to_delete = []\n for iden, node in iter(self._hivenode):\n val = node.valu\n try:\n appt = _Appt.unpack(self, val)\n if appt.iden != iden:\n raise s_exc.InconsistentStorage(mesg='iden inconsistency')\n self._addappt(iden, appt)\n self._next_indx = max(self._next_indx, appt.indx + 1)\n except (s_exc.InconsistentStorage, s_exc.BadStorageVersion, s_exc.BadTime, TypeError, KeyError,\n UnicodeDecodeError) as e:\n logger.warning('Invalid appointment %r found in storage: %r. Removing.', iden, e)\n to_delete.append(iden)\n continue\n\n for iden in to_delete:\n node = self._hivenode.get(iden)\n if node is not None:\n await node.hive.pop(node.full)\n\n # Make sure we don't assign the same index to 2 appointments\n if self.appts:\n maxindx = max(appt.indx for appt in self.appts.values())\n self._next_indx = maxindx + 1\n\n def _addappt(self, iden, appt):\n '''\n Updates the data structures to add an appointment\n '''\n if appt.nexttime:\n heapq.heappush(self.apptheap, appt)\n self.appts[iden] = appt\n if self.apptheap and self.apptheap[0] is appt:\n self._wake_event.set()\n\n async def _storeAppt(self, appt, nexs=False):\n ''' Store a single appointment '''\n full = self._hivenode.full + (appt.iden,)\n stordict = appt.pack()\n\n # Don't store ephemeral props\n for prop in ('startcount', 'errcount', 'lasterrs'):\n stordict.pop(prop, None)\n\n await self.core.hive.set(full, stordict, nexs=nexs)\n\n @staticmethod\n def _dictproduct(rdict):\n '''\n Yields a series of dicts that cover the combination of all multiple-value (e.g. lists or tuples) values, with\n non-multiple-value values remaining the same.\n '''\n multkeys = [k for k, v in rdict.items() if isinstance(v, Iterable)]\n if not multkeys:\n yield rdict\n return\n\n multvals = [rdict[k] for k in multkeys]\n\n for combo in itertools.product(*multvals):\n newdict = rdict.copy()\n for i, k in enumerate(multkeys):\n newdict[k] = combo[i]\n yield newdict\n\n def list(self):\n return list(self.appts.items())\n\n async def add(self, cdef):\n '''\n Persistently adds an appointment\n\n Args:\n cdef (dict): Dictionary containing the Cron definition.\n\n Notes:\n The cron definition may contain the following keys:\n\n creator (str)\n Iden of the creating user.\n\n iden (str)\n Iden of the appointment.\n\n storm (str)\n The Storm query to run.\n\n reqs (Union[None, Dict[TimeUnit, Union[int, Tuple[int]], List[...])\n One or more dicts of the fixed aspects of the appointment. dict value may be a single or multiple.\n May be an empty dict or None.\n\n incunit (Union[None, TimeUnit])\n The unit that changes for recurring, or None for non-recurring. It is an error for this value to\n match a key in reqdict.\n\n incvals (Union[None, int, Iterable[int])\n Count of units of incunit or explicit day of week or day of month.\n Not allowed for incunit == None, required for others (1 would be a typical value)\n\n If the values for req and incvals are both lists, all combinations of all values (the product) are used.\n\n Returns:\n Packed appointment definition\n '''\n iden = cdef['iden']\n incunit = cdef.get('incunit')\n incvals = cdef.get('incvals')\n reqs = cdef.get('reqs', {})\n query = cdef.get('storm')\n creator = cdef.get('creator')\n view = cdef.get('view')\n\n recur = incunit is not None\n indx = self._next_indx\n self._next_indx += 1\n\n if iden in self.appts:\n raise s_exc.DupIden()\n\n if not query:\n raise ValueError('\"query\" key of cdef parameter is not present or empty')\n\n await self.core.getStormQuery(query)\n\n if not creator:\n raise ValueError('\"creator\" key is cdef parameter is not present or empty')\n\n if not reqs and incunit is None:\n raise ValueError('at least one of reqs and incunit must be non-empty')\n\n if incunit is not None and incvals is None:\n raise ValueError('incvals must be non-None if incunit is non-None')\n\n if isinstance(reqs, Mapping):\n reqs = [reqs]\n\n # Find all combinations of values in reqdict values and incvals values\n nexttime = None\n recs = [] # type: ignore\n for req in reqs:\n if TimeUnit.NOW in req:\n if incunit is not None:\n mesg = \"Recurring jobs may not be scheduled to run 'now'\"\n raise ValueError(mesg)\n nexttime = self._getNowTick()\n continue\n\n reqdicts = self._dictproduct(req)\n if not isinstance(incvals, Iterable):\n incvals = (incvals, )\n recs.extend(ApptRec(rd, incunit, v) for (rd, v) in itertools.product(reqdicts, incvals))\n\n appt = _Appt(self, iden, recur, indx, query, creator, recs, nexttime=nexttime, view=view)\n self._addappt(iden, appt)\n\n appt.doc = cdef.get('doc', '')\n\n await self._storeAppt(appt)\n\n return appt.pack()\n\n async def get(self, iden):\n\n appt = self.appts.get(iden)\n if appt is not None:\n return appt\n\n mesg = f'No cron job with id: {iden}'\n raise s_exc.NoSuchIden(iden=iden, mesg=mesg)\n\n async def enable(self, iden):\n appt = self.appts.get(iden)\n if appt is None:\n raise s_exc.NoSuchIden()\n\n await self.mod(iden, appt.query)\n\n async def disable(self, iden):\n appt = self.appts.get(iden)\n if appt is None:\n raise s_exc.NoSuchIden()\n\n appt.enabled = False\n await self._storeAppt(appt)\n\n async def mod(self, iden, query):\n '''\n Change the query of an appointment\n '''\n appt = self.appts.get(iden)\n if appt is None:\n raise s_exc.NoSuchIden()\n\n if not query:\n raise ValueError('empty query')\n\n if self.enabled:\n await self.core.getStormQuery(query)\n\n appt.query = query\n appt.enabled = True # in case it was disabled for a bad query\n\n await self._storeAppt(appt)\n\n async def move(self, croniden, viewiden):\n '''\n Move a cronjob from one view to another\n '''\n appt = self.appts.get(croniden)\n if appt is None:\n raise s_exc.NoSuchIden()\n\n appt.view = viewiden\n\n await self._storeAppt(appt)\n\n async def delete(self, iden):\n '''\n Delete an appointment\n '''\n appt = self.appts.get(iden)\n if appt is None:\n raise s_exc.NoSuchIden()\n\n try:\n heappos = self.apptheap.index(appt)\n except ValueError:\n pass # this is OK, just a non-recurring appt that has no more records\n else:\n # If we're already the last item, just delete it\n if heappos == len(self.apptheap) - 1:\n del self.apptheap[heappos]\n else:\n # put the last item at the current position and reheap\n self.apptheap[heappos] = self.apptheap.pop()\n heapq.heapify(self.apptheap)\n\n del self.appts[iden]\n node = self._hivenode.get(iden)\n if node is not None:\n await node.hive.pop(node.full)\n\n def _getNowTick(self):\n return time.time() + self.tickoff\n\n def _addTickOff(self, offs):\n self.tickoff += offs\n self._wake_event.set()\n\n async def _scheduleLoop(self):\n '''\n Task loop to issue query tasks at the right times.\n '''\n while True:\n\n timeout = None\n if self.apptheap:\n timeout = self.apptheap[0].nexttime - self._getNowTick()\n\n if timeout is None or timeout > 0:\n self._wake_event.clear()\n await self._wake_event.timewait(timeout=timeout)\n\n if self.isfini:\n return\n\n now = self._getNowTick()\n while self.apptheap and self.apptheap[0].nexttime <= now:\n\n appt = heapq.heappop(self.apptheap)\n appt.updateNexttime(now)\n\n if appt.nexttime:\n heapq.heappush(self.apptheap, appt)\n\n if not appt.enabled or not self.enabled:\n continue\n\n if appt.isrunning: # pragma: no cover\n mesg = f'Appointment {appt.iden} {appt.name} is still running from previous time when scheduled' \\\n f' to run. Skipping.',\n logger.warning(mesg,\n extra={'synapse': {'iden': appt.iden, 'name': appt.name}})\n else:\n await self._execute(appt)\n\n async def _execute(self, appt):\n '''\n Fire off the task to make the storm query\n '''\n user = self.core.auth.user(appt.creator)\n if user is None:\n logger.warning(f'Unknown user {appt.creator} in stored appointment {appt.iden} {appt.name}',\n extra={'synapse': {'iden': appt.iden, 'name': appt.name, 'user': appt.creator}})\n await self._markfailed(appt, 'unknown user')\n return\n\n locked = user.info.get('locked')\n if locked:\n logger.warning(f'Cron {appt.iden} {appt.name} failed because creator {user.name} is locked',\n extra={'synapse': {'iden': appt.iden, 'name': appt.name, 'user': appt.creator,\n 'username': user.name}})\n await self._markfailed(appt, 'locked user')\n return\n\n view = self.core.getView(iden=appt.view, user=user)\n if view is None:\n logger.warning(f'Unknown view {appt.view} in stored appointment {appt.iden} {appt.name}',\n extra={'synapse': {'iden': appt.iden, 'name': appt.name, 'user': appt.creator,\n 'username': user.name, 'view': appt.view}})\n await self._markfailed(appt, 'unknown view')\n return\n\n info = {'iden': appt.iden, 'query': appt.query, 'view': view.iden}\n task = await self.core.boss.execute(self._runJob(user, appt), f'Cron {appt.iden}', user, info=info)\n\n appt.task = task\n self._running_tasks.append(task)\n\n task.onfini(functools.partial(self._running_tasks.remove, task))\n\n async def _markfailed(self, appt, reason):\n appt.lastfinishtime = appt.laststarttime = self._getNowTick()\n appt.startcount += 1\n appt.isrunning = False\n appt.lastresult = f'Failed due to {reason}'\n if not self.isfini:\n await self._storeAppt(appt, nexs=True)\n\n async def _runJob(self, user, appt):\n '''\n Actually run the storm query, updating the appropriate statistics and results\n '''\n count = 0\n appt.isrunning = True\n appt.laststarttime = self._getNowTick()\n appt.startcount += 1\n await self._storeAppt(appt, nexs=True)\n\n with s_provenance.claim('cron', iden=appt.iden):\n logger.info(f'Agenda executing for iden={appt.iden}, name={appt.name} user={user.name}, view={appt.view}, query={appt.query}',\n extra={'synapse': {'iden': appt.iden, 'name': appt.name, 'user': user.iden, 'text': appt.query,\n 'username': user.name, 'view': appt.view}})\n starttime = self._getNowTick()\n success = False\n try:\n opts = {'user': user.iden, 'view': appt.view, 'vars': {'auto': {'iden': appt.iden, 'type': 'cron'}}}\n opts = self.core._initStormOpts(opts)\n view = self.core._viewFromOpts(opts)\n # Yes, this isn't technically on the bottom half of a nexus transaction\n # But because the scheduling loop only runs on non-mirrors, we can kinda skirt by all that\n # and be relatively okay. The only catch is that the nexus offset will correspond to the\n # last nexus transaction, and not the start/stop\n await self.core.feedBeholder('cron:start', {'iden': appt.iden})\n async for node in view.eval(appt.query, opts=opts, log_info={'cron': appt.iden}):\n count += 1\n except asyncio.CancelledError:\n result = 'cancelled'\n raise\n except Exception as e:\n result = f'raised exception {e}'\n logger.exception(f'Agenda job {appt.iden} {appt.name} raised exception',\n extra={'synapse': {'iden': appt.iden, 'name': appt.name}}\n )\n else:\n success = True\n result = f'finished successfully with {count} nodes'\n finally:\n finishtime = self._getNowTick()\n if not success:\n appt.errcount += 1\n appt.lasterrs.append(result)\n took = finishtime - starttime\n mesg = f'Agenda completed query for iden={appt.iden} name={appt.name} with result \"{result}\" ' \\\n f'took {took:.3f}s'\n logger.info(mesg, extra={'synapse': {'iden': appt.iden, 'name': appt.name, 'user': user.iden,\n 'result': result, 'username': user.name, 'took': took}})\n appt.lastfinishtime = finishtime\n appt.isrunning = False\n appt.lastresult = result\n if not self.isfini:\n # fire beholder event before invoking nexus change (in case readonly)\n await self.core.feedBeholder('cron:stop', {'iden': appt.iden})\n await self._storeAppt(appt, nexs=True)\n", "sub_path": "synapse/lib/agenda.py", "file_name": "agenda.py", "file_ext": "py", "file_size_in_byte": 33949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "synapse.lib.config.getJsValidator", "line_number": 26, "usage_type": "call"}, {"api_name": "synapse.lib.config", "line_number": 26, "usage_type": "name"}, {"api_name": "synapse.lib.config.re_iden", "line_number": 30, "usage_type": "attribute"}, {"api_name": "synapse.lib.config", "line_number": 30, "usage_type": "name"}, {"api_name": "synapse.lib.config.re_iden", "line_number": 31, "usage_type": "attribute"}, {"api_name": "synapse.lib.config", "line_number": 31, "usage_type": "name"}, {"api_name": "synapse.lib.config.re_iden", "line_number": 32, "usage_type": "attribute"}, {"api_name": "synapse.lib.config", "line_number": 32, "usage_type": "name"}, {"api_name": "calendar.monthrange", "line_number": 85, "usage_type": "call"}, {"api_name": "enum.IntEnum", "line_number": 91, "usage_type": "attribute"}, {"api_name": "enum.auto", "line_number": 95, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 96, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 97, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 98, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 99, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 100, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 101, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 102, "usage_type": "call"}, {"api_name": "synapse.exc.BadTime", "line_number": 151, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 151, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 154, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 154, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 157, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 157, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 160, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 160, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 163, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 163, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 168, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 168, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 176, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 176, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 181, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 181, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 215, "usage_type": "attribute"}, {"api_name": "datetime.timezone.utc", "line_number": 215, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 215, "usage_type": "name"}, {"api_name": "calendar.monthrange", "line_number": 229, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 289, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 293, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 297, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 299, "usage_type": "call"}, {"api_name": "synapse.exc.BadTime", "line_number": 324, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 324, "usage_type": "name"}, {"api_name": "synapse.exc.BadTime", "line_number": 330, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 330, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 336, "usage_type": "call"}, {"api_name": "synapse.common.buid", "line_number": 344, "usage_type": "call"}, {"api_name": "synapse.common", "line_number": 344, "usage_type": "name"}, {"api_name": "synapse.exc.BadStorageVersion", "line_number": 397, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 397, "usage_type": "name"}, {"api_name": "synapse.lib.base.Base", "line_number": 461, "usage_type": "attribute"}, {"api_name": "synapse.lib.base", "line_number": 461, "usage_type": "name"}, {"api_name": "synapse.lib.base.Base.__anit__", "line_number": 468, "usage_type": "call"}, {"api_name": "synapse.lib.base.Base", "line_number": 468, "usage_type": "attribute"}, {"api_name": "synapse.lib.base", "line_number": 468, "usage_type": "name"}, {"api_name": "synapse.lib.coro.Event", "line_number": 476, "usage_type": "call"}, {"api_name": "synapse.lib.coro", "line_number": 476, "usage_type": "name"}, {"api_name": "synapse.exc.InconsistentStorage", "line_number": 534, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 534, "usage_type": "name"}, {"api_name": "synapse.exc.InconsistentStorage", "line_number": 537, "usage_type": "attribute"}, {"api_name": "synapse.exc", "line_number": 537, "usage_type": "name"}, {"api_name": "synapse.exc.BadStorageVersion", "line_number": 537, "usage_type": "attribute"}, {"api_name": "synapse.exc.BadTime", "line_number": 537, "usage_type": "attribute"}, {"api_name": "heapq.heappush", "line_number": 558, "usage_type": "call"}, {"api_name": "collections.abc.Iterable", "line_number": 580, "usage_type": "argument"}, {"api_name": "itertools.product", "line_number": 587, "usage_type": "call"}, {"api_name": "synapse.exc.DupIden", "line_number": 645, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 645, "usage_type": "name"}, {"api_name": "collections.abc.Mapping", "line_number": 661, "usage_type": "argument"}, {"api_name": "collections.abc.Iterable", "line_number": 676, "usage_type": "argument"}, {"api_name": "itertools.product", "line_number": 678, "usage_type": "call"}, {"api_name": "synapse.exc.NoSuchIden", "line_number": 696, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 696, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchIden", "line_number": 701, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 701, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchIden", "line_number": 708, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 708, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchIden", "line_number": 719, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 719, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchIden", "line_number": 738, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 738, "usage_type": "name"}, {"api_name": "synapse.exc.NoSuchIden", "line_number": 750, "usage_type": "call"}, {"api_name": "synapse.exc", "line_number": 750, "usage_type": "name"}, {"api_name": "heapq.heapify", "line_number": 763, "usage_type": "call"}, {"api_name": "time.time", "line_number": 771, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 797, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 801, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 847, "usage_type": "call"}, {"api_name": "synapse.lib.provenance.claim", "line_number": 867, "usage_type": "call"}, {"api_name": "synapse.lib.provenance", "line_number": 867, "usage_type": "name"}, {"api_name": "asyncio.CancelledError", "line_number": 884, "usage_type": "attribute"}]} +{"seq_id": "424391222", "text": "import csv\nimport datetime\nfrom django.core.management.base import BaseCommand\nfrom ...models import Account, Entry\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('file')\n\n def handle(self, *args, **options):\n with open(options['file'], 'r') as f:\n reader = csv.reader(f, delimiter=\";\")\n for row in reader:\n if row[8] != 'G':\n continue\n if row[2][0] not in ('6', '7'):\n continue\n date = datetime.date(int(row[1][4:6]), int(row[1][2:4]), int(row[1][0:2]))\n account, created = Account.objects.get_or_create(number=row[2], defaults={'title': row[3]})\n Entry.objects.create(date=date, account=account, title=row[6], amount=row[7])\n", "sub_path": "accounting/management/commands/import_accounting_entries.py", "file_name": "import_accounting_entries.py", "file_ext": "py", "file_size_in_byte": 820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 7, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Account.objects.get_or_create", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Account.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Account", "line_number": 20, "usage_type": "name"}, {"api_name": "models.Entry.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Entry.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Entry", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "348073719", "text": "import requests\r\nimport pandas as pd\r\nimport numpy as np\r\nimport data_collection.util as util\r\nfrom data_collection.bootstrap import Bootstrap\r\n\r\n\r\nclass Player:\r\n\r\n def __init__(self, player_id, bootstrap=True):\r\n self.player_id = player_id\r\n self.player_data = requests.get(\r\n \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(self.player_id)).json()\r\n self.player_positions = {\r\n 1: 'GKP',\r\n 2: 'DEF',\r\n 3: 'MID',\r\n 4: 'FWD'\r\n }\r\n if bootstrap:\r\n self.bootstrap = Bootstrap()\r\n\r\n def get_player_history(self):\r\n try:\r\n player_history = self.player_data['history']\r\n try:\r\n columnlist = list(player_history[0].keys())\r\n data = np.array([[d[column] for column in columnlist] for d in player_history])\r\n return util.dfcolumnstonumeric(pd.DataFrame(data, columns=columnlist))\r\n except IndexError:\r\n return pd.DataFrame(np.zeros([1, 20]), columns=['total_points', 'opponent_team', 'was_home', 'minutes',\r\n 'goals_scored', 'assists', 'clean_sheets',\r\n 'goals_conceded',\r\n 'own_goals', 'penalties_saved', 'penalties_missed',\r\n 'yellow_cards',\r\n 'red_cards', 'saves', 'bonus', 'bps', 'influence',\r\n 'creativity',\r\n 'threat', 'ict_index'])\r\n except KeyError:\r\n return pd.DataFrame(np.zeros([1, 20]), columns=['total_points', 'opponent_team', 'was_home', 'minutes',\r\n 'goals_scored', 'assists', 'clean_sheets', 'goals_conceded',\r\n 'own_goals', 'penalties_saved', 'penalties_missed',\r\n 'yellow_cards',\r\n 'red_cards', 'saves', 'bonus', 'bps', 'influence', 'creativity',\r\n 'threat', 'ict_index'])\r\n\r\n def get_player_last_game(self, n_games):\r\n try:\r\n player_history = self.player_data['history']\r\n try:\r\n if n_games > 0:\r\n return sum(pd.DataFrame(player_history[-n_games:])['total_points'])\r\n return 0\r\n except IndexError:\r\n return 0\r\n except KeyError:\r\n return 0\r\n\r\n def get_player_fixtures(self):\r\n player_fixtures = self.player_data['fixtures']\r\n columnlist = list(player_fixtures[0].keys())\r\n n_columns = len(columnlist)\r\n data = np.array([[d[column] for column in columnlist] if len(d) == n_columns else [0 for i in range(n_columns)]\r\n for d in player_fixtures])\r\n return util.dfcolumnstonumeric(pd.DataFrame(data, columns=columnlist))\r\n\r\n def get_player_last_three(self):\r\n player_history = self.get_player_history()\r\n columnlist = ['total_points', 'opponent_team', 'was_home', 'minutes',\r\n 'goals_scored', 'assists', 'clean_sheets', 'goals_conceded',\r\n 'own_goals', 'penalties_saved', 'penalties_missed', 'yellow_cards',\r\n 'red_cards', 'saves', 'bonus', 'bps', 'influence', 'creativity',\r\n 'threat', 'ict_index']\r\n player_history = player_history.reindex(columns=columnlist)\r\n flattened_last_three = np.array(player_history.iloc[-3:, :]).flatten()\r\n missing_vals = 60 - len(flattened_last_three)\r\n filler = np.full((1, missing_vals), np.nan)\r\n flattened_last_three = np.append(filler, flattened_last_three)\r\n new_column_list = ['total_points_3', 'opponent_team_3', 'was_home_3', 'minutes_3',\r\n 'goals_scored_3', 'assists_3', 'clean_sheets_3', 'goals_conceded_3',\r\n 'own_goals_3', 'penalties_saved_3', 'penalties_missed_3', 'yellow_cards_3',\r\n 'red_cards_3', 'saves_3', 'bonus_3', 'bps_3', 'influence_3', 'creativity_3',\r\n 'threat_3', 'ict_index_3', 'total_points_2', 'opponent_team_2', 'was_home_2', 'minutes_2',\r\n 'goals_scored_2', 'assists_2', 'clean_sheets_2', 'goals_conceded_2',\r\n 'own_goals_2', 'penalties_saved_2', 'penalties_missed_2', 'yellow_cards_2',\r\n 'red_cards_2', 'saves_2', 'bonus_2', 'bps_2', 'influence_2', 'creativity_2',\r\n 'threat_2', 'ict_index_2', 'total_points_1', 'opponent_team_1', 'was_home_1', 'minutes_1',\r\n 'goals_scored_1', 'assists_1', 'clean_sheets_1', 'goals_conceded_1',\r\n 'own_goals_1', 'penalties_saved_1', 'penalties_missed_1', 'yellow_cards_1',\r\n 'red_cards_1', 'saves_1', 'bonus_1', 'bps_1', 'influence_1', 'creativity_1',\r\n 'threat_1', 'ict_index_1']\r\n return pd.DataFrame([flattened_last_three], columns=new_column_list).reset_index(drop=True)\r\n\r\n def get_next_fixture_data(self):\r\n next_fixture_cols = ['team_h', 'team_a', 'is_home', 'difficulty']\r\n next_fixtures = self.get_player_fixtures()\r\n next_gameweek_fixtures = next_fixtures[next_fixtures.event == self.bootstrap.next_event]\r\n if len(next_gameweek_fixtures.index) == 0:\r\n return pd.DataFrame(np.zeros(6), index=['opposition', 'is_home', 'opposition_strength', 'opposition_overall',\r\n 'opposition_attack', 'opposition_defence']).transpose()\r\n next_fixtures = next_gameweek_fixtures.reindex(columns=next_fixture_cols)\r\n next_fixtures['id'] = [next_fixtures['team_a'][i] if next_fixtures['is_home'][i] else next_fixtures['team_h'][i]\r\n for i in range(len(next_fixtures.index))]\r\n next_fixtures = next_fixtures.reindex(columns=['id', 'is_home', 'difficulty'])\r\n team_data = self.bootstrap.team_data\r\n team_data = team_data.reindex(\r\n columns=['id', 'strength', 'strength_overall_home', 'strength_overall_away',\r\n 'strength_attack_home', 'strength_attack_away', 'strength_defence_home',\r\n 'strength_defence_away'])\r\n next_fixtures_data = next_fixtures.join(team_data.set_index('id'), on='id')\r\n next_fixtures_data['opposition_overall'] = [next_fixtures_data['strength_overall_away'][i] if\r\n next_fixtures['is_home'][i] else\r\n next_fixtures_data['strength_overall_home'][i] for i in\r\n range(len(next_fixtures_data.index))]\r\n next_fixtures_data['opposition_attack'] = [next_fixtures_data['strength_attack_away'][i] if\r\n next_fixtures['is_home'][i] else\r\n next_fixtures_data['strength_attack_home'][i] for i in\r\n range(len(next_fixtures_data.index))]\r\n next_fixtures_data['opposition_defence'] = [next_fixtures_data['strength_defence_away'][i] if\r\n next_fixtures['is_home'][i] else\r\n next_fixtures_data['strength_defence_home'][i] for i in\r\n range(len(next_fixtures_data.index))]\r\n next_fixtures_data = next_fixtures_data.reindex(columns=['id', 'is_home', 'strength',\r\n 'opposition_overall', 'opposition_attack',\r\n 'opposition_defence'])\r\n next_fixtures_data = next_fixtures_data.rename(columns={'id': 'opposition', 'strength': 'opposition_strength'})\r\n return next_fixtures_data.reset_index(drop=True)\r\n\r\n def get_player_overall(self):\r\n return self.bootstrap.get_player_info(self.player_id, ['id', 'element_type', 'chance_of_playing_next_round',\r\n 'chance_of_playing_this_round',\r\n 'form', 'points_per_game', 'total_points', 'minutes',\r\n 'goals_scored',\r\n 'assists', 'clean_sheets', 'goals_conceded', 'own_goals',\r\n 'penalties_saved', 'penalties_missed', 'yellow_cards',\r\n 'red_cards',\r\n 'saves', 'bonus', 'bps', 'influence', 'creativity',\r\n 'threat',\r\n 'ict_index']).reset_index(drop=True)\r\n\r\n def get_player_team_stats(self):\r\n player_team_id = self.bootstrap.get_player_info(self.player_id, columns=['team']).values[0][0]\r\n teamdata = self.bootstrap.team_data\r\n return teamdata[teamdata.id == player_team_id].reindex(\r\n columns=['strength', 'strength_overall_home', 'strength_overall_away',\r\n 'strength_attack_home', 'strength_attack_away', 'strength_defence_home',\r\n 'strength_defence_away']).reset_index(drop=True)\r\n", "sub_path": "data_collection/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 9964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "data_collection.bootstrap.Bootstrap", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "data_collection.util.dfcolumnstonumeric", "line_number": 29, "usage_type": "call"}, {"api_name": "data_collection.util", "line_number": 29, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "data_collection.util.dfcolumnstonumeric", "line_number": 65, "usage_type": "call"}, {"api_name": "data_collection.util", "line_number": 65, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "183351987", "text": "# -*- encoding: utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nlist1=[]\nlist2=[]\ndef get_page(url):\n r=requests.get(url)\n soup=BeautifulSoup(r.text)\n a=soup.select(\"td.c2 a\")\n for i in a:\n list1.append(i.get_text()) \nre=requests.get(\"http://www.itjobswatch.co.uk/default.aspx?page=1&sortby=0&orderby=0&q=&id=300&lid=2618\")\nsoup2=BeautifulSoup(re.text)\nb=soup2.select(\"div.resultsPager table tr td\")\nfor i in b:\n list2.append(i.get_text())\nlist2.pop()\nfor numbers in list2:\n url=(\"http://www.itjobswatch.co.uk/default.aspx?page=%s&sortby=0&orderby=0&q=&id=300&lid=2618\") % numbers\n get_page(url) \nimport csv\ncsvfile = \"output.txt\"\n\nwith open(csvfile, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n for val in list1:\n writer.writerow([val]) \nlen(list1) \n", "sub_path": "tech-terms.py", "file_name": "tech-terms.py", "file_ext": "py", "file_size_in_byte": 855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "100787727", "text": "import operator as op\n\nimport numpy as np\n\nfrom jax.config import config\n\nimport pytest\n\n# enable 64-bit mode for output dtype checks\nconfig.update(\"jax_enable_x64\", True)\nfrom typing import Optional\n\nimport jax\n\nimport scico.numpy as snp\nfrom scico import linop\nfrom scico.random import randn\nfrom scico.typing import PRNGKey\n\n\ndef adjoint_AtA_test(A: linop.LinearOperator, key: Optional[PRNGKey] = None, rtol: float = 1e-4):\n \"\"\"Check the validity of A.conj().T as the adjoint for a LinearOperator A\n\n Compares the quantity sum(x.conj() * A.conj().T @ A @ x) against\n norm(A @ x)**2. If the adjoint is correct, these quantities should be equal.\n\n Args:\n A : LinearOperator to test\n key: PRNGKey for generating `x`.\n rtol: Relative tolerance\n \"\"\"\n\n # Generate a signal in the domain of A\n x, key = randn(A.input_shape, dtype=A.input_dtype, key=key)\n\n Ax = A @ x\n\n AtAx = A.conj().T @ Ax\n num = snp.sum(x.conj() * AtAx)\n den = snp.linalg.norm(Ax) ** 2\n np.testing.assert_allclose(num / den, 1, rtol=rtol)\n\n AtAx = A.H @ Ax\n num = snp.sum(x.conj() * AtAx)\n den = snp.linalg.norm(Ax) ** 2\n np.testing.assert_allclose(num / den, 1, rtol=rtol)\n\n AtAx = A.adj(Ax)\n num = snp.sum(x.conj() * AtAx)\n den = snp.linalg.norm(Ax) ** 2\n np.testing.assert_allclose(num / den, 1, rtol=rtol)\n\n\ndef adjoint_AAt_test(A: linop.LinearOperator, key: Optional[PRNGKey] = None, rtol: float = 1e-4):\n \"\"\"Check the validity of A as the adjoint for a LinearOperator A.conj().T\n\n Compares the quantity sum(y.conj() * A @ A.conj().T @ y) against\n norm(A.conj().T @ y)**2. If the adjoint is correct, these quantities should be equal.\n\n Args:\n A : LinearOperator to test\n key: PRNGKey for generating `x`.\n rtol: Relative tolerance\n \"\"\"\n # Generate a signal in the domain of A^T\n y, key = randn(A.output_shape, dtype=A.output_dtype, key=key)\n\n Aty = A.conj().T @ y\n AAty = A @ Aty\n num = snp.sum(y.conj() * AAty)\n den = snp.linalg.norm(Aty) ** 2\n np.testing.assert_allclose(num / den, 1, rtol=rtol)\n\n Aty = A.H @ y\n AAty = A @ Aty\n num = snp.sum(y.conj() * AAty)\n den = snp.linalg.norm(Aty) ** 2\n np.testing.assert_allclose(num / den, 1, rtol=rtol)\n\n Aty = A.adj(y)\n AAty = A @ Aty\n num = snp.sum(y.conj() * AAty)\n den = snp.linalg.norm(Aty) ** 2\n np.testing.assert_allclose(num / den, 1, rtol=rtol)\n\n\nclass AbsMatOp(linop.LinearOperator):\n \"\"\"Simple LinearOperator subclass for testing purposes.\n\n Similar to linop.MatrixOperator, but does not use the specialized MatrixOperator methods (.T, adj, etc).\n Used to verify the LinearOperator interface.\n \"\"\"\n\n def __init__(self, A, adj_fn=None):\n self.A = A\n super().__init__(\n input_shape=A.shape[1], output_shape=A.shape[0], input_dtype=A.dtype, adj_fn=adj_fn\n )\n\n def _eval(self, x):\n return self.A @ x\n\n\nclass LinearOperatorTestObj:\n def __init__(self, dtype):\n M, N = (32, 64)\n key = jax.random.PRNGKey(12345)\n self.dtype = dtype\n\n self.A, key = randn((M, N), dtype=dtype, key=key)\n self.B, key = randn((M, N), dtype=dtype, key=key)\n self.C, key = randn((N, M), dtype=dtype, key=key)\n self.D, key = randn((M, N - 1), dtype=dtype, key=key)\n\n self.x, key = randn((N,), dtype=dtype, key=key)\n self.y, key = randn((M,), dtype=dtype, key=key)\n scalar, key = randn((1,), dtype=dtype, key=key)\n self.scalar = scalar.copy().ravel()[0]\n self.Ao = AbsMatOp(self.A)\n self.Bo = AbsMatOp(self.B)\n self.Co = AbsMatOp(self.C)\n self.Do = AbsMatOp(self.D)\n\n\n@pytest.fixture(scope=\"module\", params=[np.float32, np.float64, np.complex64, np.complex128])\ndef testobj(request):\n yield LinearOperatorTestObj(request.param)\n\n\n@pytest.mark.parametrize(\"operator\", [op.add, op.sub])\ndef test_binary_op(testobj, operator):\n # Our AbsMatOp class does not override the __add__, etc\n # so AbsMatOp + AbsMatOp -> LinearOperator\n # So to verify results, we evaluate the new LinearOperator on a random input\n\n comp_mat = operator(testobj.A, testobj.B) # composite matrix\n comp_op = operator(testobj.Ao, testobj.Bo) # composite linop\n\n assert isinstance(comp_op, linop.LinearOperator) # Ensure we don't get a Map\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)\n\n # linops of different sizes\n with pytest.raises(ValueError):\n operator(testobj.Ao, testobj.Co)\n with pytest.raises(ValueError):\n operator(testobj.Ao, testobj.Do)\n\n\n@pytest.mark.parametrize(\"operator\", [op.mul, op.truediv])\ndef test_scalar_left(testobj, operator):\n comp_mat = operator(testobj.A, testobj.scalar)\n comp_op = operator(testobj.Ao, testobj.scalar)\n assert isinstance(comp_op, linop.LinearOperator) # Ensure we don't get a Map\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)\n\n np.testing.assert_allclose(comp_mat.conj().T @ testobj.y, comp_op.adj(testobj.y), rtol=5e-5)\n\n\n@pytest.mark.parametrize(\"operator\", [op.mul, op.truediv])\ndef test_scalar_right(testobj, operator):\n if operator == op.truediv:\n pytest.xfail(\"scalar / LinearOperator is not supported\")\n comp_mat = operator(testobj.scalar, testobj.A)\n comp_op = operator(testobj.scalar, testobj.Ao)\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)\n\n\ndef test_negation(testobj):\n comp_mat = -testobj.A\n comp_op = -testobj.Ao\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)\n\n\n@pytest.mark.parametrize(\"operator\", [op.add, op.sub])\ndef test_invalid_add_sub_array(testobj, operator):\n # Try to add or subtract an ndarray with AbsMatOp\n with pytest.raises(TypeError):\n operator(testobj.A, testobj.Ao)\n\n\n@pytest.mark.parametrize(\"operator\", [op.add, op.sub])\ndef test_invalid_add_sub_scalar(testobj, operator):\n # Try to add or subtract a scalar with AbsMatOp\n with pytest.raises(TypeError):\n operator(1.0, testobj.Ao)\n\n\ndef test_matmul_left(testobj):\n comp_mat = testobj.A @ testobj.C\n comp_op = testobj.Ao @ testobj.Co\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ testobj.y, comp_op @ testobj.y, rtol=5e-5)\n\n\ndef test_matmul_right(testobj):\n comp_mat = testobj.C @ testobj.A\n comp_op = testobj.Co @ testobj.Ao\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ testobj.x, comp_op @ testobj.x, rtol=5e-5)\n\n\ndef test_matvec_left(testobj):\n comp_mat = testobj.A @ testobj.x\n comp_op = testobj.Ao @ testobj.x\n assert comp_op.dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat, comp_op, rtol=5e-5)\n\n\ndef test_matvec_right(testobj):\n comp_mat = testobj.C @ testobj.y\n comp_op = testobj.Co @ testobj.y\n assert comp_op.dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat, comp_op, rtol=5e-5)\n\n\ndef test_gram(testobj):\n Ao = testobj.Ao\n a = Ao.gram(testobj.x)\n b = Ao.conj().T @ Ao @ testobj.x\n c = Ao.gram_op @ testobj.x\n\n comp_mat = testobj.A.conj().T @ testobj.A @ testobj.x\n\n np.testing.assert_allclose(a, comp_mat, rtol=5e-5)\n np.testing.assert_allclose(b, comp_mat, rtol=5e-5)\n np.testing.assert_allclose(c, comp_mat, rtol=5e-5)\n\n\ndef test_matvec_call(testobj):\n # A @ x and A(x) should return same\n np.testing.assert_allclose(testobj.Ao @ testobj.x, testobj.Ao(testobj.x), rtol=5e-5)\n\n\ndef test_adj_composition(testobj):\n Ao = testobj.Ao\n Bo = testobj.Bo\n A = testobj.A\n B = testobj.B\n x = testobj.x\n\n comp_mat = A.conj().T @ B\n a = Ao.conj().T @ Bo\n b = Ao.adj(Bo)\n assert a.input_dtype == testobj.A.dtype\n assert b.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ x, a @ x, rtol=5e-5)\n np.testing.assert_allclose(comp_mat @ x, b @ x, rtol=5e-5)\n\n\ndef test_transpose_matvec(testobj):\n Ao = testobj.Ao\n y = testobj.y\n\n a = Ao.T @ y\n b = y.T @ Ao\n\n comp_mat = testobj.A.T @ y\n\n assert a.dtype == testobj.A.dtype\n assert b.dtype == testobj.A.dtype\n np.testing.assert_allclose(a, comp_mat, rtol=5e-5)\n np.testing.assert_allclose(a, b, rtol=5e-5)\n\n\ndef test_transpose_matmul(testobj):\n Ao = testobj.Ao\n Bo = testobj.Bo\n x = testobj.x\n comp_op = Ao.T @ Bo\n comp_mat = testobj.A.T @ testobj.B\n assert comp_op.input_dtype == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ x, comp_op @ x, rtol=5e-5)\n\n\ndef test_conj_transpose_matmul(testobj):\n Ao = testobj.Ao\n Bo = testobj.Bo\n x = testobj.x\n comp_op = Ao.conj().T @ Bo\n comp_mat = testobj.A.conj().T @ testobj.B\n assert comp_mat == testobj.A.dtype\n np.testing.assert_allclose(comp_mat @ x, comp_op @ x, rtol=5e-5)\n\n\ndef test_conj_matvec(testobj):\n Ao = testobj.Ao\n x = testobj.x\n a = Ao.conj() @ x\n comp_mat = testobj.A.conj() @ x\n assert a.dtype == testobj.A.dtype\n np.testing.assert_allclose(a, comp_mat, rtol=5e-5)\n\n\ndef test_adjoint_matvec(testobj):\n Ao = testobj.Ao\n y = testobj.y\n\n a = Ao.adj(y)\n b = Ao.conj().T @ y\n c = (y.conj().T @ Ao).conj()\n\n comp_mat = testobj.A.conj().T @ y\n\n assert a.dtype == testobj.A.dtype\n assert b.dtype == testobj.A.dtype\n assert c.dtype == testobj.A.dtype\n np.testing.assert_allclose(a, comp_mat, rtol=5e-5)\n np.testing.assert_allclose(a, b, rtol=5e-5)\n np.testing.assert_allclose(a, c, rtol=5e-5)\n\n\ndef test_adjoint_matmul(testobj):\n # shape mismatch\n Ao = testobj.Ao\n Co = testobj.Co\n\n with pytest.raises(ValueError):\n Ao.adj(Co)\n\n\ndef test_hermitian(testobj):\n Ao = testobj.Ao\n y = testobj.y\n\n np.testing.assert_allclose(Ao.conj().T @ y, Ao.H @ y)\n\n\ndef test_shape(testobj):\n Ao = testobj.Ao\n x = testobj.x\n y = testobj.y\n\n with pytest.raises(ValueError):\n Ao @ y\n\n with pytest.raises(ValueError):\n Ao(y)\n\n with pytest.raises(ValueError):\n Ao.T @ x\n\n with pytest.raises(ValueError):\n Ao.adj(x)\n\n\nclass TestDiagonal:\n def setup_method(self, method):\n self.key = jax.random.PRNGKey(12345)\n\n input_shapes = [(32,), (32, 48), ((3,), (4, 5))]\n\n @pytest.mark.parametrize(\"diagonal_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", input_shapes)\n def test_eval(self, input_shape, diagonal_dtype):\n diagonal, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)\n x, key = randn(input_shape, dtype=diagonal_dtype, key=key)\n\n D = linop.Diagonal(diagonal=diagonal)\n assert (D @ x).shape == D.output_shape\n np.testing.assert_allclose((diagonal * x).ravel(), (D @ x).ravel(), rtol=1e-5)\n\n @pytest.mark.parametrize(\"diagonal_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape\", input_shapes)\n def test_adjoint(self, input_shape, diagonal_dtype):\n diagonal, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)\n D = linop.Diagonal(diagonal=diagonal)\n\n adjoint_AtA_test(D)\n adjoint_AAt_test(D)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub])\n @pytest.mark.parametrize(\"diagonal_dtype\", [np.float32, np.complex64])\n @pytest.mark.parametrize(\"input_shape1\", input_shapes)\n @pytest.mark.parametrize(\"input_shape2\", input_shapes)\n def test_binary_op(self, input_shape1, input_shape2, diagonal_dtype, operator):\n\n diagonal1, key = randn(input_shape1, dtype=diagonal_dtype, key=self.key)\n diagonal2, key = randn(input_shape2, dtype=diagonal_dtype, key=key)\n x, key = randn(input_shape1, dtype=diagonal_dtype, key=key)\n\n D1 = linop.Diagonal(diagonal=diagonal1)\n D2 = linop.Diagonal(diagonal=diagonal2)\n\n if input_shape1 != input_shape2:\n with pytest.raises(ValueError):\n a = operator(D1, D2) @ x\n else:\n a = operator(D1, D2) @ x\n Dnew = linop.Diagonal(operator(diagonal1, diagonal2))\n b = Dnew @ x\n np.testing.assert_allclose(a.ravel(), b.ravel(), rtol=1e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.add, op.sub])\n def test_binary_op_mismatch(self, operator):\n diagonal_dtype = np.float32\n input_shape1 = (32,)\n input_shape2 = (48,)\n diagonal1, key = randn(input_shape1, dtype=diagonal_dtype, key=self.key)\n diagonal2, key = randn(input_shape2, dtype=diagonal_dtype, key=key)\n\n D1 = linop.Diagonal(diagonal=diagonal1)\n D2 = linop.Diagonal(diagonal=diagonal2)\n with pytest.raises(ValueError):\n operator(D1, D2)\n\n @pytest.mark.parametrize(\"operator\", [op.mul, op.truediv])\n def test_scalar_right(self, operator):\n if operator == op.truediv:\n pytest.xfail(\"scalar / LinearOperator is not supported\")\n\n diagonal_dtype = np.float32\n input_shape = (32,)\n\n diagonal1, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)\n scalar = np.random.randn()\n x, key = randn(input_shape, dtype=diagonal_dtype, key=key)\n\n D = linop.Diagonal(diagonal=diagonal1)\n scaled_D = operator(scalar, D)\n\n np.testing.assert_allclose(scaled_D @ x, operator(scalar, D @ x), rtol=5e-5)\n\n @pytest.mark.parametrize(\"operator\", [op.mul, op.truediv])\n def test_scalar_left(self, operator):\n diagonal_dtype = np.float32\n input_shape = (32,)\n\n diagonal1, key = randn(input_shape, dtype=diagonal_dtype, key=self.key)\n scalar = np.random.randn()\n x, key = randn(input_shape, dtype=diagonal_dtype, key=key)\n\n D = linop.Diagonal(diagonal=diagonal1)\n scaled_D = operator(D, scalar)\n\n np.testing.assert_allclose(scaled_D @ x, operator(D @ x, scalar), rtol=5e-5)\n\n\ndef test_adj_lazy():\n dtype = np.float32\n M, N = (32, 64)\n A, key = randn((M, N), dtype=np.float32, key=None)\n y, key = randn((M,), dtype=np.float32, key=key)\n Ao = AbsMatOp(A, adj_fn=None) # defer setting the linop\n\n assert Ao._adj is None\n a = Ao.adj(y) # Adjoint is set when .adj() is called\n b = A.T @ y\n np.testing.assert_allclose(a, b, rtol=1e-5)\n\n\ndef test_jit_adj_lazy():\n dtype = np.float32\n M, N = (32, 64)\n A, key = randn((M, N), dtype=np.float32, key=None)\n y, key = randn((M,), dtype=np.float32, key=key)\n Ao = AbsMatOp(A, adj_fn=None) # defer setting the linop\n assert Ao._adj is None\n Ao.jit() # Adjoint set here\n assert Ao._adj is not None\n a = Ao.adj(y)\n b = A.T @ y\n np.testing.assert_allclose(a, b, rtol=1e-5)\n\n\nclass PowerIterTestObj:\n def __init__(self, dtype):\n M, N = (8, 8)\n key = jax.random.PRNGKey(12345)\n self.dtype = dtype\n\n A, key = randn((M, N), dtype=dtype, key=key)\n self.A = A.conj().T @ A # ensure symmetric\n\n self.Ao = linop.MatrixOperator(self.A)\n self.Bo = AbsMatOp(self.A)\n\n self.key = key\n self.ev = snp.linalg.norm(\n self.A, 2\n ) # The largest eigenvalue of A is the spectral norm of A\n\n\n@pytest.fixture(scope=\"module\", params=[np.float32, np.complex64])\ndef pitestobj(request):\n yield PowerIterTestObj(request.param)\n\n\ndef test_power_iteration(pitestobj):\n \"\"\"Verify that power iteration calculates largest eigenvalue for real and complex\n symmetric matrices.\n \"\"\"\n # Test using the LinearOperator MatrixOperator\n mu, v = linop.power_iteration(A=pitestobj.Ao, maxiter=100, key=pitestobj.key)\n assert np.abs(mu - pitestobj.ev) < 1e-4\n\n # Test using the AbsMatOp for test_linop.py\n mu, v = linop.power_iteration(A=pitestobj.Bo, maxiter=100, key=pitestobj.key)\n assert np.abs(mu - pitestobj.ev) < 1e-4\n\n\nclass SumTestObj:\n def __init__(self, dtype):\n self.x, key = randn((4, 5, 6, 7), dtype=dtype)\n\n\n@pytest.fixture(scope=\"module\", params=[np.float32, np.complex64])\ndef sumtestobj(request):\n yield SumTestObj(request.param)\n\n\nsum_axis = [\n None,\n 0,\n 1,\n 2,\n 3,\n (0, 1),\n (0, 2),\n (0, 3),\n (1, 2),\n (1, 3),\n (2, 3),\n (0, 1, 2),\n (0, 1, 3),\n (1, 2, 3),\n (0, 1, 2, 3),\n]\n\n\n@pytest.mark.parametrize(\"axis\", sum_axis)\ndef test_sum_eval(sumtestobj, axis):\n x = sumtestobj.x\n\n A = linop.Sum(input_shape=x.shape, input_dtype=x.dtype, sum_axis=axis)\n\n np.testing.assert_allclose(A @ x, snp.sum(x, axis=axis), rtol=1e-3)\n\n\n@pytest.mark.parametrize(\"axis\", sum_axis)\ndef test_sum_adj(sumtestobj, axis):\n x = sumtestobj.x\n A = linop.Sum(input_shape=x.shape, input_dtype=x.dtype, sum_axis=axis)\n adjoint_AtA_test(A)\n adjoint_AAt_test(A)\n\n\n@pytest.mark.parametrize(\"axis\", (5, (1, 1), (0, 1, 2, 3, 4)))\ndef test_sum_bad_shapes(sumtestobj, axis):\n # integer too high, repeated values, list too long\n x = sumtestobj.x\n with pytest.raises(ValueError):\n A = linop.Sum(input_shape=x.shape, input_dtype=x.dtype, sum_axis=axis)\n", "sub_path": "scico/test/linop/test_linop.py", "file_name": "test_linop.py", "file_ext": "py", "file_size_in_byte": 17179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "jax.config.config.update", "line_number": 10, "usage_type": "call"}, {"api_name": "jax.config.config", "line_number": 10, "usage_type": "name"}, {"api_name": "scico.linop.LinearOperator", "line_number": 21, "usage_type": "attribute"}, {"api_name": "scico.linop", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "scico.typing.PRNGKey", "line_number": 21, "usage_type": "name"}, {"api_name": "scico.random.randn", "line_number": 34, "usage_type": "call"}, {"api_name": "scico.numpy.sum", "line_number": 39, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 39, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 40, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 40, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 41, "usage_type": "attribute"}, {"api_name": "scico.numpy.sum", "line_number": 44, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 44, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 45, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 45, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 46, "usage_type": "attribute"}, {"api_name": "scico.numpy.sum", "line_number": 49, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 49, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 50, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 50, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 51, "usage_type": "attribute"}, {"api_name": "scico.linop.LinearOperator", "line_number": 54, "usage_type": "attribute"}, {"api_name": "scico.linop", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 54, "usage_type": "name"}, {"api_name": "scico.typing.PRNGKey", "line_number": 54, "usage_type": "name"}, {"api_name": "scico.random.randn", "line_number": 66, "usage_type": "call"}, {"api_name": "scico.numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 70, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 71, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 71, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 72, "usage_type": "attribute"}, {"api_name": "scico.numpy.sum", "line_number": 76, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 76, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 77, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 77, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 78, "usage_type": "attribute"}, {"api_name": "scico.numpy.sum", "line_number": 82, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 82, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 83, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 83, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 84, "usage_type": "attribute"}, {"api_name": "scico.linop.LinearOperator", "line_number": 87, "usage_type": "attribute"}, {"api_name": "scico.linop", "line_number": 87, "usage_type": "name"}, {"api_name": "jax.random.PRNGKey", "line_number": 107, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 107, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 110, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 111, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 112, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 113, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 115, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 116, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 117, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.complex128", "line_number": 125, "usage_type": "attribute"}, {"api_name": "scico.linop.LinearOperator", "line_number": 139, "usage_type": "attribute"}, {"api_name": "scico.linop", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 144, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 146, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 130, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 130, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 130, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 130, "usage_type": "attribute"}, {"api_name": "scico.linop.LinearOperator", "line_number": 154, "usage_type": "attribute"}, {"api_name": "scico.linop", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 156, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 150, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 150, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 150, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 150, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pytest.xfail", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 161, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 161, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 161, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 181, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 178, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 178, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 178, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 188, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 185, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 185, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 185, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 196, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 210, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 229, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 235, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 250, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 265, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 276, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 286, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 311, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 312, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 313, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 329, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 337, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 340, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 343, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 346, "usage_type": "call"}, {"api_name": "jax.random.PRNGKey", "line_number": 352, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 352, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 359, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 360, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 362, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 362, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 364, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 364, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 356, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 356, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 356, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 356, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 357, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 357, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 369, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 370, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 370, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 366, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 366, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 366, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 366, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 367, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 367, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 381, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 382, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 383, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 385, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 385, "usage_type": "name"}, {"api_name": "scico.linop.Diagonal", "line_number": 386, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 386, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 389, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 393, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 393, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 375, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 375, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 375, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 375, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 376, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 376, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 376, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 376, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 377, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 377, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 378, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 378, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 399, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 402, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 403, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 405, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 405, "usage_type": "name"}, {"api_name": "scico.linop.Diagonal", "line_number": 406, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 406, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 407, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 397, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 397, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 397, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 397, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 412, "usage_type": "attribute"}, {"api_name": "pytest.xfail", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 415, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 419, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 420, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 422, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 422, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 410, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 410, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 410, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 410, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 429, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 432, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 433, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 434, "usage_type": "call"}, {"api_name": "scico.linop.Diagonal", "line_number": 436, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 436, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 439, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 427, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 427, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 427, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 427, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 443, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 445, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 446, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 452, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 452, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 456, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 458, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 459, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 466, "usage_type": "attribute"}, {"api_name": "jax.random.PRNGKey", "line_number": 472, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 472, "usage_type": "attribute"}, {"api_name": "scico.random.randn", "line_number": 475, "usage_type": "call"}, {"api_name": "scico.linop.MatrixOperator", "line_number": 478, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 478, "usage_type": "name"}, {"api_name": "scico.numpy.linalg.norm", "line_number": 482, "usage_type": "call"}, {"api_name": "scico.numpy.linalg", "line_number": 482, "usage_type": "attribute"}, {"api_name": "scico.numpy", "line_number": 482, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 487, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 487, "usage_type": "attribute"}, {"api_name": "scico.linop.power_iteration", "line_number": 497, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 497, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 498, "usage_type": "call"}, {"api_name": "scico.linop.power_iteration", "line_number": 501, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 501, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 502, "usage_type": "call"}, {"api_name": "scico.random.randn", "line_number": 507, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 510, "usage_type": "attribute"}, {"api_name": "numpy.complex64", "line_number": 510, "usage_type": "attribute"}, {"api_name": "scico.linop.Sum", "line_number": 538, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 538, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 540, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 540, "usage_type": "attribute"}, {"api_name": "scico.numpy.sum", "line_number": 540, "usage_type": "call"}, {"api_name": "scico.numpy", "line_number": 540, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 534, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 534, "usage_type": "attribute"}, {"api_name": "scico.linop.Sum", "line_number": 546, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 546, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 543, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 543, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 555, "usage_type": "call"}, {"api_name": "scico.linop.Sum", "line_number": 556, "usage_type": "call"}, {"api_name": "scico.linop", "line_number": 556, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 551, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 551, "usage_type": "attribute"}]} +{"seq_id": "467003955", "text": "import numpy as np\nimport cv2 as cv\nfrom frame import images_normalize, frames_down_up_sample, images_random_crop, images_rescale, image_resize_aspectratio\nfrom PIL import Image\n\nnp.set_printoptions(threshold=np.inf)\nresize_min_dim = 256\n\ntest_path = \"Videos/v_CricketShot_g04_c01.avi\"\n\n\"\"\" 画像を正規化し, npy形式で保存 \"\"\"\ndef main():\n video = cv.VideoCapture(test_path)\n frame_count = int(video.get(cv.CAP_PROP_FRAME_COUNT))\n fps = video.get(cv.CAP_PROP_FPS)\n print(\"フレーム総数: \" + str(frame_count))\n print(\"fps: \" + str(fps))\n fourcc = cv.VideoWriter_fourcc('m','p','4', 'v')\n fc = 0\n add_dimension_list = []\n\n while(1):\n ret, frame = video.read()\n if ret == False:\n break\n frame_resized = image_resize_aspectratio(frame, resize_min_dim)\n\n if fc == 0:\n frame_height, frame_width, _ = frame_resized.shape\n buf = np.empty((frame_count, frame_height, frame_width, 3), np.dtype('uint8'))\n\n #BGRからRGBに変更\n rgb = cv.cvtColor(frame_resized, cv.COLOR_BGR2RGB)\n buf[fc] = rgb\n fc += 1\n\n #frames = images_normalize(buf, 79, 224, 224)\n #gif_frames = (frames + 1)/2\n #Image.fromarray(gif_frames).save('test.jpg')\n pil_gif_frames = []\n for i in range(buf.shape[0]):\n #for i in range(gif_frames.shape[0]):\n pil_gif_frames.append(Image.fromarray(np.uint8(buf[i])))\n #print(pil_gif_frames[0])\n pil_gif_frames[0].save('test_rgb.gif', save_all=True, append_images=pil_gif_frames[1:], optimize=False, duration=40, loop=0)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "I3D_PreprocessTest/make_gif_rgb.py", "file_name": "make_gif_rgb.py", "file_ext": "py", "file_size_in_byte": 1535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.set_printoptions", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 18, "usage_type": "call"}, {"api_name": "frame.image_resize_aspectratio", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 33, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "638165447", "text": "\n# coding: utf-8\n\n# In[5]:\n\n\nimport os\nimport numpy as np\nimport tools\n\n\n# In[6]:\n\n\ndef make_front_and_back_examples(in_dir, out_dir):\n \"\"\"生成每个样本的正面和背面两个角度的图像矩阵\n \"\"\"\n for f in os.listdir(in_dir):\n fp = os.path.join(in_dir, f)\n fn = f.split('.')[0]\n \n data = tools.read_data(fp)\n np.save(os.path.join(out_dir, fn + '_front.npy'), np.rot90(data[:,:,0]))\n np.save(os.path.join(out_dir, fn + '_back.npy'), np.rot90(data[:,:,31]))\n\n\n# In[21]:\n\n\ndef make_region_examples(in_dir, out_dir, x1, x2, y1, y2):\n \"\"\"生成区域样本,输入为make_front_and_back_examples输出的正面图和背面图\n \"\"\"\n for f in os.listdir(in_dir):\n data = np.load(os.path.join(in_dir, f))\n if 'back' in f:\n # 背面图像需要做水平翻转,统一坐标系\n data = np.fliplr(data)\n np.save(os.path.join(out_dir, f), data[x1:x2,y1:y2])\n\n\n# In[15]:\n\n\ndef make_binary_one_hot_labels(in_file, out_file, zone_num):\n li = []\n with open(in_file, 'r') as f:\n first = True\n for line in f:\n if first == True:\n first = False\n continue\n line = line.strip()\n tmp, target = line.split(',')\n sid, zone = tmp.split('_')\n if zone != 'Zone' + str(zone_num):\n continue\n if target == '0':\n li.append([1,0])\n else:\n li.append([0,1])\n a = np.array(li)\n np.save(out_file, a)\n\n", "sub_path": "psac/py/sample_maker.py", "file_name": "sample_maker.py", "file_ext": "py", "file_size_in_byte": 1555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tools.read_data", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.rot90", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.rot90", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.fliplr", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "371511704", "text": "import pandas as pd\nimport numpy as np\nfrom scipy import stats as st\nfrom scipy.special import stdtrit\ndef create_df(path) -> pd.DataFrame:\n return pd.read_csv(path)\n\ndef get_t_stat(df,column,null_value):\n \n return st.ttest_1samp(df[column],null_value) # (t score, prob)\n\ndef get_critical_value(df,p):\n return stdtrit(df,1-p)\n\ndef get_desired_mean(df,column,p,h_null):\n critical_val = get_critical_value(len(df.index) - 1 ,p)\n return critical_val * (df[column].std()/np.sqrt(len(df.index))) + h_null\n\ndef refine(df,column,h_null,threshold,left=True):\n desired_mean = get_desired_mean(df,\"a\",.05,34.5)\n t_star = get_critical_value(len(df.index) - 1,.05)\n cur_t = get_t_stat(df,column,h_null)[0]\n step = desired_mean/100\n n = len(df.index)\n print(f\"t_star={t_star} cur_t={cur_t}\")\n while (left and cur_t > t_star) or (not left and cur_t < t_star):\n cur_t = get_t_stat(df,column,h_null)[0]\n print(f\"cur_t:{cur_t}\")\n row_to_modify = np.random.randint(0,n)\n multiplier = 1\n if cur_t > t_star and left:\n multiplier = -1 \n df.iloc[row_to_modify][column] += step * multiplier\n return df\n \n \nif __name__ == \"__main__\":\n df = pd.DataFrame({\"a\":[32.5,34.5,37.3,35.6,36.5,37,40,32]})\n \n new = refine(df,\"a\",34.5,.05,False)\n print(\"New df:\", new)\n print(get_t_stat(new,\"a\",34.5))", "sub_path": "phack/hack.py", "file_name": "hack.py", "file_ext": "py", "file_size_in_byte": 1397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 5, "usage_type": "attribute"}, {"api_name": "scipy.stats.ttest_1samp", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 10, "usage_type": "name"}, {"api_name": "scipy.special.stdtrit", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "236929800", "text": "from flask import Flask, render_template, g, url_for, redirect, flash\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\nfrom flask_sqlalchemy import SQLAlchemy\nimport pymysql\npymysql.install_as_MySQLdb()\n\n\napp = Flask(__name__)\n\n# 配置类\nclass Config():\n SECRET_KEY = \"JNDJFNNNFDNF\"\n SQLALCHEMY_DATABASE_URI = \"mysql://root:123456@192.168.35.158:3306/flaskbook\"\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n\napp.config.from_object(Config)\n\n# 数据类\ndb = SQLAlchemy(app)\nclass Author(db.Model):\n __tablename__ = \"fb_author\"\n id = db.Column(db.Integer, primary_key=True)\n author = db.Column(db.String(30), unique=True)\n book = db.relation(\"Book\", backref=\"author\")\n\nclass Book(db.Model):\n __tablename__ = \"fb_book\"\n id = db.Column(db.Integer, primary_key=True)\n book = db.Column(db.String(30), unique=True)\n is_del = db.Column(db.Boolean, default=False)\n author_id = db.Column(db.Integer, db.ForeignKey(\"fb_author.id\"))\n\n# 模板类\nclass BookSubmit(FlaskForm):\n author = StringField(label=\"作者\", validators=[DataRequired(\"请输入作者名称\")])\n book = StringField(label=\"图书\", validators=[DataRequired(\"请输入图书名称\")])\n\n submit = SubmitField(label=\"提交\")\n\n# 路由\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n form = BookSubmit()\n if form.validate_on_submit():\n book = form.book.data\n author = form.author.data\n add_book(book, author)\n\n\n book_li = Book.query.filter_by(is_del=False).all()\n books_info = list()\n for book in book_li:\n book_info = {\n \"book_id\": book.id,\n \"book\": book.book,\n \"author\": book.author.author\n }\n books_info.append(book_info)\n\n data = {\n \"form\": form,\n \"books_info\": books_info\n }\n return render_template(\"index.html\", **data)\n\n@app.route(\"/delete/\")\ndef delete(bookid):\n Book.query.filter_by(id=bookid).update({\"is_del\": True})\n db.session.commit()\n return redirect(url_for(\"index\"))\n\ndef add_book(book, author):\n # 判断书籍是否存在\n flag = Book.query.filter_by(book=book).first()\n if flag is None:\n #不存在\n # 向数据库中添加信息\n curent_author = Author(author=author)\n db.session.add(curent_author)\n db.session.commit()\n curent_book = Book(book=book, author_id = curent_author.id)\n db.session.add(curent_book)\n db.session.commit()\n else:\n #存在\n if flag.is_del:\n flag.is_del = False\n db.session.add(flag)\n db.session.commit()\n else:\n flash(\"已经存在!\")\n\n\nif __name__ == '__main__':\n # db.drop_all()\n # db.create_all()\n # au_xi = Author(author='我吃西红柿')\n # au_qian = Author(author='萧潜')\n # au_san = Author(author='唐家三少')\n # db.session.add_all([au_xi, au_qian, au_san])\n # db.session.commit()\n #\n # bk_xi = Book(book='吞噬星空', author_id=au_xi.id)\n # bk_xi2 = Book(book='寸芒', author_id=au_qian.id)\n # bk_qian = Book(book='飘渺之旅', author_id=au_qian.id)\n # bk_san = Book(book='冰火魔厨', author_id=au_san.id)\n # db.session.add_all([bk_xi, bk_xi2, bk_qian, bk_san])\n # db.session.commit()\n app.run()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pymysql.install_as_MySQLdb", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 36, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 37, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 37, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 38, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 38, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "323518082", "text": "import math\nimport os\nimport json\nimport numpy as np\n\nfrom config.humanoid_config import HumanoidConfig as c\n\nclass HumanoidPoseInterpolator(object):\n\n def __init__(self):\n pass\n\n def Reset(self,\n basePos=[0, 0, 0],\n baseOrn=[0, 0, 0, 1],\n chestRot=[0, 0, 0, 1],\n neckRot=[0, 0, 0, 1],\n rightHipRot=[0, 0, 0, 1],\n rightKneeRot=[0],\n rightAnkleRot=[0, 0, 0, 1],\n rightShoulderRot=[0, 0, 0, 1],\n rightElbowRot=[0],\n leftHipRot=[0, 0, 0, 1],\n leftKneeRot=[0],\n leftAnkleRot=[0, 0, 0, 1],\n leftShoulderRot=[0, 0, 0, 1],\n leftElbowRot=[0],\n baseLinVel=[0, 0, 0],\n baseAngVel=[0, 0, 0],\n chestVel=[0, 0, 0],\n neckVel=[0, 0, 0],\n rightHipVel=[0, 0, 0],\n rightKneeVel=[0],\n rightAnkleVel=[0, 0, 0],\n rightShoulderVel=[0, 0, 0],\n rightElbowVel=[0],\n leftHipVel=[0, 0, 0],\n leftKneeVel=[0],\n leftAnkleVel=[0, 0, 0],\n leftShoulderVel=[0, 0, 0],\n leftElbowVel=[0]):\n\n self._basePos = basePos\n self._baseLinVel = baseLinVel\n self._baseOrn = baseOrn\n self._baseAngVel = baseAngVel\n\n self._chestRot = chestRot\n self._chestVel = chestVel\n self._neckRot = neckRot\n self._neckVel = neckVel\n\n self._rightHipRot = rightHipRot\n self._rightHipVel = rightHipVel\n self._rightKneeRot = rightKneeRot\n self._rightKneeVel = rightKneeVel\n self._rightAnkleRot = rightAnkleRot\n self._rightAnkleVel = rightAnkleVel\n\n self._rightShoulderRot = rightShoulderRot\n self._rightShoulderVel = rightShoulderVel\n self._rightElbowRot = rightElbowRot\n self._rightElbowVel = rightElbowVel\n\n self._leftHipRot = leftHipRot\n self._leftHipVel = leftHipVel\n self._leftKneeRot = leftKneeRot\n self._leftKneeVel = leftKneeVel\n self._leftAnkleRot = leftAnkleRot\n self._leftAnkleVel = leftAnkleVel\n\n self._leftShoulderRot = leftShoulderRot\n self._leftShoulderVel = leftShoulderVel\n self._leftElbowRot = leftElbowRot\n self._leftElbowVel = leftElbowVel\n\n def ComputeLinVel(self, posStart, posEnd, deltaTime): \n \"\"\" 根据xyz计算线速度 \"\"\"\n vel = [\n (posEnd[0] - posStart[0]) / deltaTime, \n (posEnd[1] - posStart[1]) / deltaTime,\n (posEnd[2] - posStart[2]) / deltaTime\n ]\n return vel\n\n def ComputeAngVel(self, ornStart, ornEnd, deltaTime, bullet_client):\n \"\"\" 用于计算base的角速度 \"\"\"\n dorn = bullet_client.getDifferenceQuaternion(ornStart, ornEnd)\n axis, angle = bullet_client.getAxisAngleFromQuaternion(dorn)\n angVel = [\n (axis[0] * angle) / deltaTime, \n (axis[1] * angle) / deltaTime,\n (axis[2] * angle) / deltaTime\n ]\n return angVel\n\n def ComputeAngVelRel(self, ornStart, ornEnd, deltaTime, bullet_client):\n \"\"\" 用于计算关节的角速度\"\"\"\n ornStartConjugate = [-ornStart[0], -ornStart[1], -ornStart[2], ornStart[3]] # 共轭四元数,几何意义:旋转轴不变,旋转角相反\n pos_diff, q_diff = bullet_client.multiplyTransforms(positionA=[0, 0, 0], orientationA=ornStartConjugate,\n positionB=[0, 0, 0], orientationB=ornEnd)\n axis, angle = bullet_client.getAxisAngleFromQuaternion(q_diff)\n angVel = [\n (axis[0] * angle) / deltaTime, \n (axis[1] * angle) / deltaTime,\n (axis[2] * angle) / deltaTime\n ]\n return angVel\n\n def GetStatePosVel(self):\n \"\"\" Return a len=77 list. \"\"\"\n \n state = []\n\n # Base pos orn len=7\n state.extend(self._basePos) # state[0:3]\n state.extend(self._baseOrn) # state[3:7]\n\n # Joint rotation len=8*4+4*1=36\n state.extend(self._chestRot) # state[7:11]\n state.extend(self._neckRot) # state[11:15]\n state.extend(self._rightHipRot) # state[15:19]\n state.extend(self._rightKneeRot) # state[19:20]\n state.extend(self._rightAnkleRot) # state[20:24]\n state.extend(self._rightShoulderRot) # state[24:28]\n state.extend(self._rightElbowRot) # state[28:29]\n state.extend(self._leftHipRot) # state[29:33]\n state.extend(self._leftKneeRot) # state[33:34]\n state.extend(self._leftAnkleRot) # state[34:38]\n state.extend(self._leftShoulderRot) # state[38:42]\n state.extend(self._leftElbowRot) # state[42:43]\n\n # Base lin ang vel len=6\n state.extend(self._baseLinVel) # state[43:46]\n state.extend(self._baseAngVel) # state[46:49]\n\n # Joint ang vel len=8*3+4*1=28\n state.extend(self._chestVel) # state[49:52]\n state.extend(self._neckVel) # state[52:55]\n state.extend(self._rightHipVel) # state[55:58]\n state.extend(self._rightKneeVel) # state[58:59]\n state.extend(self._rightAnkleVel) # state[59:62]\n state.extend(self._rightShoulderVel) # state[62:65]\n state.extend(self._rightElbowVel) # state[65:66]\n state.extend(self._leftHipVel) # state[66:69]\n state.extend(self._leftKneeVel) # state[69:70]\n state.extend(self._leftAnkleVel) # state[70:73]\n state.extend(self._leftShoulderVel) # state[73:76]\n state.extend(self._leftElbowVel) # state[76:77]\n\n return state\n\n def Slerp(self, frameFraction, frameData, frameDataNext, bullet_client):\n \"\"\"\n Args:\n frameFraction -- 量化current frame相较于start frame走了多远\n frameData, frameDataNext -- 直接从txt中读的数据\n 其中四元数顺序为x,y,z,w\n \n \"\"\"\n \n self.Reset()\n\n keyFrameDuration = frameData[0]\n\n ##### Base Position\n basePos1Start = [frameData[1], frameData[2], frameData[3]]\n basePos1End = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]\n self._basePos = [\n basePos1Start[0] + frameFraction * (basePos1End[0] - basePos1Start[0]),\n basePos1Start[1] + frameFraction * (basePos1End[1] - basePos1Start[1]),\n basePos1Start[2] + frameFraction * (basePos1End[2] - basePos1Start[2])\n ]\n self._baseLinVel = self.ComputeLinVel(basePos1Start, basePos1End, keyFrameDuration)\n \n ##### Base Orientation\n baseOrn1Start = [frameData[4], frameData[5], frameData[6], frameData[7]]\n baseOrn1Next = [frameDataNext[4], frameDataNext[5], frameDataNext[6], frameDataNext[7]]\n self._baseOrn = bullet_client.getQuaternionSlerp(baseOrn1Start, baseOrn1Next, frameFraction)\n self._baseAngVel = self.ComputeAngVel(baseOrn1Start, baseOrn1Next, keyFrameDuration, bullet_client)\n\n ##### Chest\n chestRotStart = [frameData[8], frameData[9], frameData[10], frameData[11]]\n chestRotEnd = [frameDataNext[8], frameDataNext[9], frameDataNext[10], frameDataNext[11]]\n self._chestRot = bullet_client.getQuaternionSlerp(chestRotStart, chestRotEnd, frameFraction)\n self._chestVel = self.ComputeAngVelRel(chestRotStart, chestRotEnd, keyFrameDuration, bullet_client)\n\n ##### Neck\n neckRotStart = [frameData[12], frameData[13], frameData[14], frameData[15]]\n neckRotEnd = [frameDataNext[12], frameDataNext[13], frameDataNext[14], frameDataNext[15]]\n self._neckRot = bullet_client.getQuaternionSlerp(neckRotStart, neckRotEnd, frameFraction)\n self._neckVel = self.ComputeAngVelRel(neckRotStart, neckRotEnd, keyFrameDuration, bullet_client)\n\n ##### Right Hip\n rightHipRotStart = [frameData[16], frameData[17], frameData[18], frameData[19]]\n rightHipRotEnd = [frameDataNext[16], frameDataNext[17], frameDataNext[18], frameDataNext[19]]\n self._rightHipRot = bullet_client.getQuaternionSlerp(rightHipRotStart, rightHipRotEnd, frameFraction)\n self._rightHipVel = self.ComputeAngVelRel(rightHipRotStart, rightHipRotEnd, keyFrameDuration, bullet_client)\n\n ##### Right Knee\n rightKneeRotStart = [frameData[20]]\n rightKneeRotEnd = [frameDataNext[20]]\n self._rightKneeRot = [\n rightKneeRotStart[0] + frameFraction * (rightKneeRotEnd[0] - rightKneeRotStart[0])\n ]\n self._rightKneeVel = [(rightKneeRotEnd[0] - rightKneeRotStart[0]) / keyFrameDuration]\n\n ##### Right Ankle\n rightAnkleRotStart = [frameData[21], frameData[22], frameData[23], frameData[24]]\n rightAnkleRotEnd = [frameDataNext[21], frameDataNext[22], frameDataNext[23], frameDataNext[24]]\n self._rightAnkleRot = bullet_client.getQuaternionSlerp(rightAnkleRotStart, rightAnkleRotEnd, frameFraction)\n self._rightAnkleVel = self.ComputeAngVelRel(rightAnkleRotStart, rightAnkleRotEnd, keyFrameDuration, bullet_client)\n\n ##### Right Shoulder\n rightShoulderRotStart = [frameData[25], frameData[26], frameData[27], frameData[28]]\n rightShoulderRotEnd = [frameDataNext[25], frameDataNext[26], frameDataNext[27], frameDataNext[28]]\n self._rightShoulderRot = bullet_client.getQuaternionSlerp(rightShoulderRotStart, rightShoulderRotEnd, frameFraction)\n self._rightShoulderVel = self.ComputeAngVelRel(rightShoulderRotStart, rightShoulderRotEnd, keyFrameDuration, bullet_client)\n\n ##### Right Elbow\n rightElbowRotStart = [frameData[29]]\n rightElbowRotEnd = [frameDataNext[29]]\n self._rightElbowRot = [\n rightElbowRotStart[0] + frameFraction * (rightElbowRotEnd[0] - rightElbowRotStart[0])\n ]\n self._rightElbowVel = [(rightElbowRotEnd[0] - rightElbowRotStart[0]) / keyFrameDuration]\n\n ##### Left Hip\n leftHipRotStart = [frameData[30], frameData[31], frameData[32], frameData[33]]\n leftHipRotEnd = [frameDataNext[30], frameDataNext[31], frameDataNext[32], frameDataNext[33]]\n self._leftHipRot = bullet_client.getQuaternionSlerp(leftHipRotStart, leftHipRotEnd, frameFraction)\n self._leftHipVel = self.ComputeAngVelRel(leftHipRotStart, leftHipRotEnd, keyFrameDuration, bullet_client)\n\n ##### Left Knee\n leftKneeRotStart = [frameData[34]]\n leftKneeRotEnd = [frameDataNext[34]]\n self._leftKneeRot = [\n leftKneeRotStart[0] + frameFraction * (leftKneeRotEnd[0] - leftKneeRotStart[0])\n ]\n self._leftKneeVel = [(leftKneeRotEnd[0] - leftKneeRotStart[0]) / keyFrameDuration]\n\n ##### Left Ankle\n leftAnkleRotStart = [frameData[35], frameData[36], frameData[37], frameData[38]]\n leftAnkleRotEnd = [frameDataNext[35], frameDataNext[36], frameDataNext[37], frameDataNext[38]]\n self._leftAnkleRot = bullet_client.getQuaternionSlerp(leftAnkleRotStart, leftAnkleRotEnd, frameFraction)\n self._leftAnkleVel = self.ComputeAngVelRel(leftAnkleRotStart, leftAnkleRotEnd, keyFrameDuration, bullet_client)\n\n ##### Left Shoulder\n leftShoulderRotStart = [frameData[39], frameData[40], frameData[41], frameData[42]]\n leftShoulderRotEnd = [frameDataNext[39], frameDataNext[40], frameDataNext[41], frameDataNext[42]]\n self._leftShoulderRot = bullet_client.getQuaternionSlerp(leftShoulderRotStart, leftShoulderRotEnd, frameFraction)\n self._leftShoulderVel = self.ComputeAngVelRel(leftShoulderRotStart, leftShoulderRotEnd, keyFrameDuration, bullet_client)\n\n ##### Left Elbow\n leftElbowRotStart = [frameData[43]]\n leftElbowRotEnd = [frameDataNext[43]]\n self._leftElbowRot = [\n leftElbowRotStart[0] + frameFraction * (leftElbowRotEnd[0] - leftElbowRotStart[0])\n ]\n self._leftElbowVel = [(leftElbowRotEnd[0] - leftElbowRotStart[0]) / keyFrameDuration]\n\n state = self.GetStatePosVel()\n return state\n\n\nclass PybulletMocapData():\n \"\"\"只使用于pybullet提供txt数据集\"\"\"\n\n def __init__(self, path, pybullet_client):\n # 加载数据集\n assert os.path.exists(path)\n self._path = path\n self._mocap_data = []\n with open(path, 'r') as f:\n self._mocap_data = json.load(f)\n \n # 对数据集进行后处理, 调整四元数的顺序 w,x,y,z -> x,y,z,w\n self.postProcess()\n\n self._pb_client = pybullet_client\n self._poseInterpolator = HumanoidPoseInterpolator()\n \n def DataPath(self):\n return self._path\n\n def NumFrames(self):\n return len(self._mocap_data['Frames'])\n \n def KeyFrameDuration(self):\n return self._mocap_data['Frames'][0][0]\n\n def postProcess(self):\n \"\"\" 调整四元数的顺序 \"\"\"\n numFrames = self.NumFrames()\n for i in range(numFrames):\n oldFrameData = self._mocap_data['Frames'][i]\n newFrameData = []\n\n # keyFrameDuration\n newFrameData.extend([oldFrameData[0]])\n # base position\n newFrameData.extend([oldFrameData[1], oldFrameData[2], oldFrameData[3]])\n # base orientation\n newFrameData.extend([oldFrameData[5], oldFrameData[6], oldFrameData[7], oldFrameData[4]])\n # 12 joints\n newFrameData.extend([oldFrameData[9], oldFrameData[10], oldFrameData[11], oldFrameData[8]])\n newFrameData.extend([oldFrameData[13], oldFrameData[14], oldFrameData[15], oldFrameData[12]])\n newFrameData.extend([oldFrameData[17], oldFrameData[18], oldFrameData[19], oldFrameData[16]])\n newFrameData.extend([oldFrameData[20]])\n newFrameData.extend([oldFrameData[22], oldFrameData[23], oldFrameData[24], oldFrameData[21]])\n newFrameData.extend([oldFrameData[26], oldFrameData[27], oldFrameData[28], oldFrameData[25]])\n newFrameData.extend([oldFrameData[29]])\n newFrameData.extend([oldFrameData[31], oldFrameData[32], oldFrameData[33], oldFrameData[30]])\n newFrameData.extend([oldFrameData[34]])\n newFrameData.extend([oldFrameData[36], oldFrameData[37], oldFrameData[38], oldFrameData[35]])\n newFrameData.extend([oldFrameData[40], oldFrameData[41], oldFrameData[42], oldFrameData[39]])\n newFrameData.extend([oldFrameData[43]])\n\n self._mocap_data['Frames'][i] = newFrameData\n\n def getCycleTime(self):\n \"\"\" 计算运动序列的持续时间, 单位是秒 \"\"\"\n keyFrameDuration = self.KeyFrameDuration()\n cycleTime = keyFrameDuration * (self.NumFrames() - 1)\n return cycleTime\n\n def calcCycleCount(self, curTime, cycleTime):\n \"\"\" 计算循环次数\n \n Args:\n curTime -- 当前时间\n cycleTime -- 运动序列的持续时间(一次循环所需要的时间)\n Return:\n count -- 次数,如果仿真时间小于一次循环所需要的时间,将会返回0\n\n \"\"\"\n phases = curTime / cycleTime\n count = math.floor(phases)\n return count\n\n def computeCycleOffset(self):\n \"\"\" 计算数据集中第一帧和最后一帧的basePosition(xyz)的偏移量 \"\"\"\n firstFrame = 0\n lastFrame = self.NumFrames() - 1\n frameData = self._mocap_data['Frames'][firstFrame]\n frameDataNext = self._mocap_data['Frames'][lastFrame]\n\n basePosStart = [frameData[1], frameData[2], frameData[3]]\n basePosEnd = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]\n cycleOffset = [\n basePosEnd[0] - basePosStart[0], \n basePosEnd[1] - basePosStart[1],\n basePosEnd[2] - basePosStart[2]\n ]\n return cycleOffset\n\n\n def getSpecTimeState(self, t):\n \"\"\" 获取指定时刻的State, 包括速度 \"\"\"\n \n # 将时间t转换为frame, frameNext, frameFraction\n curTime = t\n keyFrameDuration = self.KeyFrameDuration()\n cycleTime = self.getCycleTime()\n cycleCount = self.calcCycleCount(curTime, cycleTime)\n frameTime = curTime - cycleCount * cycleTime\n if frameTime < 0:\n frameTime += cycleTime\n \n frame = int(frameTime / keyFrameDuration)\n frameNext = frame + 1\n if frameNext >= self.NumFrames():\n frameNext = frame\n frameFraction = (frameTime - frame * keyFrameDuration) / keyFrameDuration\n\n # 取出帧的数据, 进行插值\n frameData = self._mocap_data['Frames'][frame]\n frameDataNext = self._mocap_data['Frames'][frameNext]\n\n self._poseInterpolator.Slerp(frameFraction, frameData, frameDataNext, self._pb_client)\n\n cycleOffset = self.computeCycleOffset()\n oldPos = self._poseInterpolator._basePos\n self._poseInterpolator._basePos = [\n oldPos[0] + cycleCount * cycleOffset[0],\n oldPos[1] + cycleCount * cycleOffset[1],\n oldPos[2] + cycleCount * cycleOffset[2]\n ] \n\n return self._poseInterpolator.GetStatePosVel()\n", "sub_path": "src/samcon/mocapdata.py", "file_name": "mocapdata.py", "file_ext": "py", "file_size_in_byte": 16362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.exists", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 271, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 275, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 338, "usage_type": "call"}]} +{"seq_id": "33500446", "text": "from django.urls import re_path\nfrom navigation.models import Player\nfrom . import views\n\n# levels share a url, views.play handles which level is shown\nurlpatterns = [\n re_path(r'^title/?$', views.title, name='title'),\n re_path(r'^title/login/?$', views.login, name='login'),\n re_path(r'^level-select/?$', views.level_select, name='level-select'),\n re_path(r'^leaderboard/?$', views.leaderboard, name='leaderboard'),\n re_path(r'^play/?$', views.play, name='play'),\n re_path(r'^play/submit/?$', views.play_submit, name='play_submit'),\n re_path(r'^play/solution/?$', views.play_solution, name='play_solution'),\n re_path(r'^project/?$', views.project, name='project'),\n]\n", "sub_path": "django-server/brachi/navigation/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.urls.re_path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "67043862", "text": "# -*- coding:utf-8 -*-\n\n# pylint: disable=E0611\n# pylint: disable=E1101\n# pylint: disable=W0703\n\"\"\"\ngolf.py\n\nBreakdown of % shot between four distances\nLONG = 250 - 200 yd\nMID = 200 - 150 yd\nSHORT = 150 - 100 yd\nSCORE = 100 - 0 yd\n\"\"\"\nimport csv\nimport codecs\nimport os\nimport sqlite3\nfrom sqlite3 import Error\n\nDATABASE = \"golf.db\"\nLABELS = [\"driver\",\n \"fairway\",\n \"four\",\n \"five\",\n \"six\",\n \"seven\",\n \"eight\",\n \"nine\",\n \"pitch\",\n \"sand\",\n \"putt\"]\nCLUBS = list(map(lambda x:x.upper(), LABELS))\n\ndef blue(message):\n \"\"\"\n blue color\n\n :return: str\n \"\"\"\n return f\"\\x1b[34;1m{message}\\x1b[0m\"\n\n\ndef clubs():\n \"\"\"\n clubs\n\n TODO replace with csv\n\n :return: [[str]]\n \"\"\"\n data = []\n data.append(mk_str([\"driver\", 9, 45, 58.5, 250, \"HZRDUS Smoke RDX\", \"PXG 0811X\"]))\n data.append(mk_str([\"fairway\", 15, 43.5, 59, 225, \"HZRDUS Smoke\", \"Callaway Mavrik SZ\"]))\n data.append(mk_str([\"four\", 23, 38.5, 60.5, 200, \"ProjectX IO 6.0\", \"Apex Pro 21\"]))\n data.append(mk_str([\"five\", 26, 38, 61, 180, \"ProjectX IO 6.0\", \"Apex Pro 21\"]))\n data.append(mk_str([\"six\", 29, 37.5, 61.5, 170, \"ProjectX IO 6.0\", \"Apex Pro 21\"]))\n data.append(mk_str([\"seven\", 33, 37, 62, 160, \"ProjectX 6.0\", \"Apex Pro 21\"]))\n data.append(mk_str([\"eight\", 37, 36.5, 62.5, 150, \"ProjectX 6.0\", \"Apex Pro 21\"]))\n data.append(mk_str([\"nine\", 41, 36, 63, 140, \"ProjectX 6.0\", \"Apex Pro 21\"]))\n data.append(mk_str([\"pitch\", 48, 35.75, 63.5, 130, \"Wedge\", \"Titleist SM-8\"]))\n data.append(mk_str([\"sand\", 56, 35.5, 64, 100, \"Elevate\", \"PXG 0311\"]))\n data.append(mk_str([\"putt\", 3.5, 34, 70, 10, \"Putter\", \"Scotty Cameron Special Select #2\"]))\n return data\n\n\ndef colorize(connection, club):\n \"\"\"\n colorize\n\n :param connection: :class:`sqlite3`\n :param club: str\n :return: str\n \"\"\"\n spec = read_spec(connection, club)[0]\n loft = spec[2]\n if loft < 9:\n color = green(club)\n elif loft < 26:\n color = blue(club)\n elif loft < 37:\n color = red(club)\n elif loft < 56:\n color = yellow(club)\n else:\n color = green(club)\n return color\n\n\ndef create_db(db_file):\n \"\"\"\n create_db\n\n :param db_file: database file\n :return: :class:`sqlite3`\n \"\"\"\n conn = None\n os.chdir(\"/var/tmp\")\n if os.path.exists(db_file):\n os.unlink(db_file)\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as err:\n print(err)\n return conn\n\n\ndef create_task(connection, task):\n \"\"\"\n create_task\n\n :param connection: :class:`sqlite3`\n :param task: sql task\n \"\"\"\n try:\n connection.executescript(task)\n except Error as err:\n print(err)\n\n\ndef create_tbl(connection):\n \"\"\"\n create_tbl\n\n :param connection: :class:`sqlite3`\n \"\"\"\n schema = \"\"\"CREATE TABLE IF NOT EXISTS scores (\n id integer PRIMARY KEY autoincrement NOT NULL,\n event date,\n course text,\n score integer,\n driver integer,\n fairway integer,\n four integer,\n five integer,\n six integer,\n seven integer,\n eight integer,\n nine integer,\n pitch integer,\n sand integer,\n putt integer\n );\n CREATE TABLE IF NOT EXISTS bag (\n id integer PRIMARY KEY autoincrement NOT NULL,\n club text,\n loft integer,\n length float,\n lie float,\n yardage integer,\n shaft text,\n model text\n );\n \"\"\"\n create_task(connection, schema)\n\n\ndef csv_to_data(csv_file):\n \"\"\"\n csv_to_data\n\n :param csv_file: str\n :return: [[str]]\n \"\"\"\n data = []\n with open(csv_file, \"rb\") as stream:\n csv_open = csv.reader(codecs.iterdecode(stream, \"utf-8\"))\n for row in csv_open:\n new_row = []\n for item in row:\n new_row.append(item.strip())\n data.append(mk_score(new_row))\n return data\n\n\ndef green(message):\n \"\"\"\n green color\n\n :return: str\n \"\"\"\n return f\"\\x1b[32;1m{message}\\x1b[0m\"\n\n\ndef last_clause(amt=5):\n \"\"\"\n last_clause\n\n :param amt: int\n :return: str\n \"\"\"\n return f\"ORDER BY id DESC LIMIT {amt}\"\n\n\ndef mk_pct(amt=1, total=1):\n \"\"\"\n mk_pct\n\n :param amt: int\n :param total: int\n :return: float\n \"\"\"\n return f\"{(amt/total)*100:.2f}\"\n\n\ndef mk_score(data):\n \"\"\"\n mk_score -- derives putts and penalties from total score\n\n :param data: [str]\n :return: [str]\n \"\"\"\n choice = []\n for item in data[3:]:\n choice.append(int(item))\n putts = int(data[2]) - sum(choice)\n data.append(putts)\n return mk_str(data)\n\n\ndef mk_str(data):\n \"\"\"\n mk_str -- quote for sql\n\n :param data: [str]\n :return: [str]\n \"\"\"\n return [\"'\" + str(x).upper() + \"'\" for x in data]\n\n\ndef update_bag(connection, data):\n \"\"\"\n update_bag\n\n :param connection: :class:`sqlite3`\n :param data: [str]\n \"\"\"\n for row in data:\n line = \",\".join(row)\n sql = f\"\"\"INSERT INTO bag (\n club,\n loft,\n length,\n lie,\n yardage,\n shaft,\n model\n )\n VALUES ({line});\n \"\"\"\n create_task(connection, sql)\n\n\ndef update_score(connection, data):\n \"\"\"\n update_score\n\n :param connection: :class:`sqlite3`\n :param data: [str]\n \"\"\"\n for row in data:\n line = \",\".join(row)\n sql = f\"\"\"INSERT INTO scores (\n event,\n course,\n score,\n driver,\n fairway,\n four,\n five,\n six,\n seven,\n eight,\n nine,\n pitch,\n sand,\n putt\n )\n VALUES ({line});\n \"\"\"\n create_task(connection, sql)\n\n\ndef read_avg(connection, data, amt):\n \"\"\"\n read_avg\n\n :param connection: :class:`sqlite3`\n :param data: str\n :param amt: int\n :return: int\n \"\"\"\n if amt > 1:\n score = f\"SELECT AVG({data}) FROM scores {last_clause(amt)};\"\n else:\n score = f\"SELECT {data} FROM scores {last_clause(amt)};\"\n cursor = connection.cursor()\n cursor.execute(score)\n count = cursor.fetchall()[0]\n return count[0]\n\n\ndef read_sum(connection, data, amt):\n \"\"\"\n read_sum\n\n :param connection: :class:`sqlite3`\n :param data: str\n :param amt: int\n :return: int\n \"\"\"\n if amt > 1:\n score = f\"SELECT SUM({data}) FROM scores {last_clause(amt)};\"\n else:\n score = f\"SELECT {data} FROM scores {last_clause(amt)};\"\n cursor = connection.cursor()\n cursor.execute(score)\n count = cursor.fetchall()[0]\n return count[0]\n\n\ndef read_spec(connection, data):\n \"\"\"\n read_spec\n\n :param connection: :class:`sqlite3`\n :param data: str\n :return: [(str)]\n \"\"\"\n score = f\"select * from bag where club = '{data}';\"\n cursor = connection.cursor()\n cursor.execute(score)\n return cursor.fetchall()\n\n\ndef show_bag(connection):\n \"\"\"\n show_bag\n\n :param connection: :class:`sqlite3`\n \"\"\"\n for club in CLUBS:\n row = read_spec(connection, club)[0]\n print(row)\n\n\ndef show_histogram(connection, amt):\n \"\"\"\n show_histogram\n\n :param connection: :class:`sqlite3`\n :param amt: int\n \"\"\"\n hist = []\n hist.append([white(\"CLUB\"), \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10+\"])\n for label in CLUBS:\n club = read_avg(connection, label, amt)\n row = []\n tick = 0\n row.append(colorize(connection, label))\n while tick < club:\n if tick > 8:\n row.append(f\"> ({club:.2f})\")\n tick = club\n else:\n row.append(\"*\")\n tick += 1\n hist.append(row)\n for item in hist:\n ticks = \" \".join(item[1:])\n print(f\"{item[0]:10s}\\t {ticks}\")\n\n\ndef show_long(connection, amt):\n \"\"\"\n show_long\n\n :param connection: :class:`sqlite3`\n :param amt: int\n :return: int\n \"\"\"\n count = 0\n for club in CLUBS[0:3]:\n count += read_sum(connection, club, amt)\n return count\n\n\ndef show_mid(connection, amt):\n \"\"\"\n show_mid\n\n :param connection: :class:`sqlite3`\n :param amt: int\n :return: int\n \"\"\"\n count = 0\n for club in CLUBS[3:6]:\n count += read_sum(connection, club, amt)\n return count\n\n\ndef show_short(connection, amt):\n \"\"\"\n show_mid\n\n :param connection: :class:`sqlite3`\n :param amt: int\n :return: int\n \"\"\"\n count = 0\n for club in CLUBS[6:9]:\n count += read_sum(connection, club, amt)\n return count\n\n\ndef show_score(connection, amt):\n \"\"\"\n show_score\n\n :param connection: :class:`sqlite3`\n :param amt: int\n :return: int\n \"\"\"\n sand = read_sum(connection, \"sand\", amt)\n putt = read_sum(connection, \"putt\", amt)\n return sand + putt\n\n\ndef show_stats(connection, amt):\n \"\"\"\n show_stats\n\n :param connection: :class:`sqlite3`\n :param amt: int\n \"\"\"\n total = read_sum(connection, \"score\", amt)\n _avg = read_avg(connection, \"score\", amt)\n _long = blue(mk_pct(show_long(connection, amt), total))\n _mid = red(mk_pct(show_mid(connection, amt), total))\n _short = yellow(mk_pct(show_short(connection, amt), total))\n _score = green(mk_pct(show_score(connection, amt), total))\n print(f\"[{_long}% / {_mid}% / {_short}% / {_score}%] SCORE={_avg:.2f}\")\n\n\ndef red(message):\n \"\"\"\n red color\n\n :return: str\n \"\"\"\n return f\"\\x1b[31;1m{message}\\x1b[0m\"\n\n\ndef run():\n \"\"\"\n run\n \"\"\"\n conn = create_db(DATABASE)\n if conn is not None:\n create_tbl(conn)\n update_bag(conn, clubs())\n update_score(conn, scores())\n print(\"=== BAG ===\")\n show_bag(conn)\n print(\"=== AVERAGE ===\")\n show_histogram(conn, amt=10)\n show_stats(conn, amt=10)\n print(\"=== LAST ===\")\n show_histogram(conn, amt=1)\n show_stats(conn, amt=1)\n\n\ndef scores():\n \"\"\"\n scores\n\n :return: [[str]]\n \"\"\"\n return csv_to_data(\"/var/tmp/scores.csv\")\n\n\ndef yellow(message):\n \"\"\"\n yellow color\n\n :return: str\n \"\"\"\n return f\"\\x1b[33;1m{message}\\x1b[0m\"\n\n\ndef white(message):\n \"\"\"\n white color\n\n :return: str\n \"\"\"\n return f\"\\x1b[37;1m{message}\\x1b[0m\"\n\n\nif __name__ == \"__main__\":\n run()\n", "sub_path": "examples/python/golf.py", "file_name": "golf.py", "file_ext": "py", "file_size_in_byte": 10265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.chdir", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 100, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 104, "usage_type": "name"}, {"api_name": "sqlite3.Error", "line_number": 118, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 168, "usage_type": "call"}, {"api_name": "codecs.iterdecode", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "607796627", "text": "#!/usr/bin/env python\r\n# encoding: utf-8\r\n# File Name: data_util.py\r\n# Author: Jiezhong Qiu\r\n# Create Time: 2019/12/30 14:20\r\n# TODO:\r\n\r\nimport numpy as np\r\nimport scipy\r\nimport torch\r\n# import tensorflow as tf\r\nimport io\r\nimport scipy.sparse as sparse\r\nfrom scipy.sparse import linalg\r\nimport sklearn.preprocessing as preprocessing\r\nimport torch.nn.functional as F\r\nimport dgl\r\nimport matplotlib.pyplot as plt\r\nimport itertools\r\n\r\nclass Distance(object):\r\n def __init__(self, p=2, emb_name=\"prone\"):\r\n self.p = p # p-norm\r\n self.emb_name = emb_name\r\n def __call__(self, graph):\r\n def distance(edges):\r\n return {'dis': torch.norm(edges.src[self.emb_name]-edges.dst[self.emb_name], p=self.p, dim=1)}\r\n graph.apply_edges(func=distance,\r\n edges='__ALL__',\r\n inplace=True)\r\n return graph\r\n\r\ndef batcher():\r\n def batcher_dev(batch):\r\n graph_q, graph_k = zip(*batch)\r\n # print(len(graph_q), len(graph_k))\r\n graph_q, graph_k = dgl.batch(graph_q), dgl.batch(graph_k)\r\n # try:\r\n # # print(\"lala\", [sg.number_of_nodes() for sg in graph_q], [sg.number_of_nodes() for sg in graph_k])\r\n # graph_q, graph_k = dgl.batch(graph_q), dgl.batch(graph_k)\r\n # except:\r\n # print(\"lala\", [sg.number_of_nodes() for sg in graph_q], [sg.number_of_nodes() for sg in graph_k])\r\n # return None, None\r\n # return graph_q[0], graph_k[0]\r\n # print(type(graph_q), type(graph_k))\r\n # print(graph_q.dstdata['neigh'])\r\n # print(graph_q.ndata['pos_undirected'].size(), graph_k.ndata['pos_undirected'].size(), graph_q.number_of_nodes(), graph_k.number_of_nodes())\r\n return graph_q, graph_k\r\n return batcher_dev\r\n\r\ndef labeled_batcher():\r\n def batcher_dev(batch):\r\n graph_q, label = zip(*batch)\r\n graph_q = dgl.batch(graph_q)\r\n return graph_q, torch.LongTensor(label)\r\n return batcher_dev\r\n\r\nclass dynamic_batcher(object):\r\n def __init__(self, max_node_per_batch=128*32*2, max_edge_per_batch=20000):\r\n self.max_node_per_batch = max_node_per_batch\r\n self.max_edge_per_batch = max_edge_per_batch\r\n\r\n def __call__(self, batch):\r\n # TODO make it more elegant with itertools?\r\n # batch = sorted(batch, key=lambda graphs: graphs[0].number_of_edges() + graphs[1].number_of_edges())\r\n graph_q, graph_k = zip(*batch)\r\n accum_node, accum_edge = 0, 0\r\n for i in range(len(graph_q)):\r\n accum_node += graph_q[i].number_of_nodes() + graph_k[i].number_of_nodes()\r\n accum_edge += graph_q[i].number_of_edges() + graph_k[i].number_of_edges()\r\n if i > 1 and (accum_node > self.max_node_per_batch or accum_edge > self.max_edge_per_batch):\r\n graph_q = graph_q[:i]\r\n graph_k = graph_k[:i]\r\n break\r\n graph_q, graph_k = dgl.batch(graph_q), dgl.batch(graph_k)\r\n return graph_q, graph_k\r\n\r\nclass filter_batcher(object):\r\n def __init__(self, max_node=256, max_edge=2048):\r\n self.max_node = max_node\r\n self.max_edge = max_edge\r\n\r\n def __call__(self, batch):\r\n predicate = lambda graphs: \\\r\n graphs[0].number_of_nodes() > self.max_node or \\\r\n graphs[0].number_of_edges() > self.max_edge or \\\r\n graphs[1].number_of_nodes() > self.max_node or \\\r\n graphs[1].number_of_edges() > self.max_edge\r\n batch = itertools.filterfalse(predicate, batch)\r\n graph_q, graph_k = zip(*batch)\r\n graph_q, graph_k = dgl.batch(graph_q), dgl.batch(graph_k)\r\n return graph_q, graph_k\r\n\r\ndef plot_to_image(figure):\r\n\t\"\"\"Converts the matplotlib plot specified by 'figure' to a PNG image and\r\n\treturns it. The supplied figure is closed and inaccessible after this call.\"\"\"\r\n\t# Save the plot to a PNG in memory.\r\n\tbuf = io.BytesIO()\r\n\tplt.savefig(buf, format='png')\r\n\t# Closing the figure prevents it from being displayed directly inside\r\n\t# the notebook.\r\n\tplt.close(figure)\r\n\tbuf.seek(0)\r\n\t# Convert PNG buffer to TF image\r\n\timage = tf.image.decode_png(buf.getvalue(), channels=3)\r\n\t# Add the batch dimension\r\n\timage = tf.expand_dims(image, 0)\r\n\treturn torch.from_numpy(image.numpy())\r\n\r\ndef _edge_subgraph(trace, seed):\r\n mapping = dict()\r\n edge_list = set()\r\n mapping[seed] = 0\r\n for walk in trace:\r\n u = seed\r\n for v in walk.tolist():\r\n if (u, v) not in edge_list:\r\n if u not in mapping:\r\n mapping[u] = len(mapping)\r\n if v not in mapping:\r\n mapping[v] = len(mapping)\r\n edge_list.add((u, v))\r\n u = v\r\n subg = dgl.DGLGraph()\r\n subg.add_nodes(len(mapping))\r\n u_list, v_list = [], []\r\n for u, v in edge_list:\r\n u_list.append(mapping[u])\r\n v_list.append(mapping[v])\r\n subg.add_edges(u_list, v_list)\r\n return subg\r\n\r\ndef _rwr_trace_to_dgl_graph(g, seed, trace,\r\n positional_embedding_size=None, entire_graph=False, notCat=False,\r\n use_g=False):\r\n if not use_g:\r\n if not notCat:\r\n subv = torch.unique(torch.cat(trace)).tolist()\r\n else:\r\n subv = torch.unique(trace).tolist()\r\n try:\r\n subv.remove(seed)\r\n except ValueError:\r\n pass\r\n try:\r\n subv.remove(-1)\r\n except ValueError:\r\n pass\r\n subv = [seed] + subv\r\n if entire_graph:\r\n subg = g.subgraph(g.nodes())\r\n else:\r\n subg = g.subgraph(subv)\r\n else:\r\n subg = g.subgraph(g.nodes())\r\n # assert subg.parent_nid[0] == seed, \"by construction, node 0 in subgraph should be the seed\"\r\n\r\n if positional_embedding_size is not None:\r\n subg = _add_undirected_graph_positional_embedding(subg, positional_embedding_size)\r\n # subg = _add_undirected_graph_positional_embedding(subg, positional_embedding_size // 2)\r\n\r\n # mapping = dict([(v, k) for k, v in enumerate(subg.parent_nid.tolist())])\r\n # nfreq = torch.zeros(subg.number_of_nodes(), dtype=torch.long)\r\n # efreq = torch.zeros(subg.number_of_edges(), dtype=torch.long)\r\n\r\n # M = np.zeros(\r\n # shape=(subg.number_of_nodes(), subg.number_of_nodes()),\r\n # dtype=np.float32\r\n # )\r\n # for walk in trace:\r\n # u = mapping[seed]\r\n # nfreq[u] += 1\r\n # for v in walk.tolist():\r\n # v = mapping[v]\r\n # nfreq[v] += 1\r\n # # add edge feature for (u, v)\r\n # eid = subg.edge_id(u, v)\r\n # efreq[eid] += 1\r\n # M[u, v] += 1\r\n # u = v\r\n\r\n # subg = _add_directed_graph_positional_embedding(subg, M, positional_embedding_size // 2)\r\n\r\n # subg.ndata['nfreq'] = nfreq\r\n # subg.edata['efreq'] = efreq\r\n\r\n subg.ndata['seed'] = torch.zeros(subg.number_of_nodes(), dtype=torch.long)\r\n if entire_graph:\r\n subg.ndata['seed'][seed] = 1\r\n else:\r\n subg.ndata['seed'][0] = 1\r\n return subg\r\n\r\ndef eigen_decomposision(n, k, laplacian, hidden_size, retry):\r\n if k <= 0:\r\n return torch.zeros(n, hidden_size)\r\n laplacian = laplacian.astype('float64')\r\n ncv=min(n, max(2*k + 1, 20))\r\n # follows https://stackoverflow.com/questions/52386942/scipy-sparse-linalg-eigsh-with-fixed-seed\r\n v0 = np.random.rand(n).astype('float64')\r\n for i in range(retry):\r\n try:\r\n s, u = linalg.eigsh(\r\n laplacian,\r\n k=k,\r\n which='LA',\r\n ncv=ncv,\r\n v0=v0)\r\n except sparse.linalg.eigen.arpack.ArpackError:\r\n print(\"arpack error, retry=\", i)\r\n ncv = min(ncv*2, n)\r\n if i + 1 == retry:\r\n sparse.save_npz('arpack_error_sparse_matrix.npz', laplacian)\r\n u = torch.zeros(n, k)\r\n else:\r\n break\r\n x = preprocessing.normalize(u, norm='l2')\r\n x = torch.from_numpy(x.astype('float32'))\r\n x = F.pad(x, (0, hidden_size-k), 'constant', 0)\r\n return x\r\n\r\n\r\ndef _add_directed_graph_positional_embedding(g, M, hidden_size, retry=10, alpha=0.95):\r\n # Follow https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.linalg.laplacianmatrix.directed_laplacian_matrix.html#directed-laplacian-matrix\r\n # We use its pagerank mode\r\n n = g.number_of_nodes()\r\n # add constant to dangling nodes' row\r\n dangling = scipy.where(M.sum(axis=1) == 0)\r\n for d in dangling[0]:\r\n M[d] = 1.0 / n\r\n # normalize\r\n M = M / M.sum(axis=1)\r\n P = alpha * M + (1 - alpha) / n\r\n if n == 2:\r\n evals, evecs = np.linalg.eig(P.T)\r\n evals = evals.flatten().real\r\n evecs = evecs[:, 0] if evals[0] > evals[1] else evecs[:, 1]\r\n else:\r\n evals, evecs = sparse.linalg.eigs(P.T, k=1, ncv=n)\r\n v = evecs.flatten().real\r\n p = v / v.sum()\r\n sqrtp = scipy.sqrt(p)\r\n Q = sparse.spdiags(sqrtp, [0], n, n) * P * sparse.spdiags(1.0/sqrtp, [0], n, n)\r\n # I = scipy.identity(n)\r\n # laplacian = I - (Q + Q.T)/2.0\r\n laplacian = (Q + Q.T)/2.0\r\n k=min(n-2, hidden_size)\r\n x = eigen_decomposision(n, k, laplacian, hidden_size, retry)\r\n g.ndata['pos_directed'] = x.float()\r\n return g\r\n\r\ndef _add_undirected_graph_positional_embedding(g, hidden_size, retry=10):\r\n # We use eigenvectors of normalized graph laplacian as vertex features.\r\n # It could be viewed as a generalization of positional embedding in the\r\n # attention is all you need paper.\r\n # Recall that the eignvectors of normalized laplacian of a line graph are cos/sin functions.\r\n # See section 2.4 of http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf\r\n n = g.number_of_nodes()\r\n # adj = g.adjacency_matrix_scipy(transpose=False, return_edge_ids=False).astype(float)\r\n adj = g.adjacency_matrix(transpose=False, scipy_fmt=\"csr\").astype(float)\r\n norm = sparse.diags(\r\n dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5,\r\n dtype=float)\r\n laplacian = norm * adj * norm\r\n k=min(n-2, hidden_size)\r\n x = eigen_decomposision(n, k, laplacian, hidden_size, retry)\r\n g.ndata['pos_undirected'] = x.float()\r\n return g\r\n\r\n", "sub_path": "graph_self_learn/data_util.py", "file_name": "data_util.py", "file_ext": "py", "file_size_in_byte": 10337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.norm", "line_number": 27, "usage_type": "call"}, {"api_name": "dgl.batch", "line_number": 37, "usage_type": "call"}, {"api_name": "dgl.batch", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 55, "usage_type": "call"}, {"api_name": "dgl.batch", "line_number": 75, "usage_type": "call"}, {"api_name": "itertools.filterfalse", "line_number": 89, "usage_type": "call"}, {"api_name": "dgl.batch", "line_number": 91, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 108, "usage_type": "call"}, {"api_name": "dgl.DGLGraph", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 187, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 200, "usage_type": "attribute"}, {"api_name": "scipy.sparse.linalg.eigsh", "line_number": 203, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg", "line_number": 203, "usage_type": "name"}, {"api_name": "scipy.sparse.linalg", "line_number": 209, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 209, "usage_type": "name"}, {"api_name": "scipy.sparse.save_npz", "line_number": 213, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 213, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 217, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 217, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.nn.functional.pad", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 219, "usage_type": "name"}, {"api_name": "scipy.where", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 235, "usage_type": "attribute"}, {"api_name": "scipy.sparse.linalg.eigs", "line_number": 239, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg", "line_number": 239, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 239, "usage_type": "name"}, {"api_name": "scipy.sqrt", "line_number": 242, "usage_type": "call"}, {"api_name": "scipy.sparse.spdiags", "line_number": 243, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 243, "usage_type": "name"}, {"api_name": "scipy.sparse.diags", "line_number": 261, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 261, "usage_type": "name"}, {"api_name": "dgl.backend.asnumpy", "line_number": 262, "usage_type": "call"}, {"api_name": "dgl.backend", "line_number": 262, "usage_type": "attribute"}]} +{"seq_id": "475972060", "text": "#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: sftp_chg_dir.py\n\n Description: Unit testing of SFTP.chg_dir in sftp_class.py.\n\n Usage:\n test/unit/sftp_class/sftp_chg_dir.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport sftp_class\nimport version\n\n__version__ = version.__version__\n\n\nclass SSHClient(object):\n\n \"\"\"Class: SSHClient\n\n Description: Class stub holder for paramiko.SSHClient class.\n\n Methods:\n __init__ -> Class initialization.\n open_sftp -> open_sftp method.\n chdir -> chdir method.\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n \"\"\"\n\n self.chg_dir = None\n\n def open_sftp(self):\n\n \"\"\"Method: open_sftp\n\n Description: open_sftp method.\n\n Arguments:\n\n \"\"\"\n\n return self.chdir\n\n def chdir(self, chg_dir):\n\n \"\"\"Method: chdir\n\n Description: chdir method.\n\n Arguments:\n\n \"\"\"\n\n self.chg_dir = chg_dir\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp -> Unit testing initilization.\n test_chg_dir_good -> Test with changing directory.\n tearDown -> Clean up of unit testing.\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n class CfgTest(object):\n\n \"\"\"Class: CfgTest\n\n Description: Class which is a representation of a cfg module.\n\n Methods:\n __init__ -> Initialize configuration environment.\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Method: __init__\n\n Description: Initialization instance of the CfgTest class.\n\n Arguments:\n\n \"\"\"\n\n self.username = \"username\"\n self.password = None\n self.host = \"hostname\"\n self.port = 22\n self.log_file = \"./test/unit/sftp_class/tmp/paramiko.log\"\n\n self.cfg = CfgTest()\n self.sshclient = SSHClient()\n self.cfg_file = \"Config_File\"\n self.cfg_dir = \"Config_Dir\"\n self.chg_dir = \"/dir/path\"\n\n @mock.patch(\"sftp_class.paramiko.SSHClient.open_sftp\")\n @mock.patch(\"sftp_class.gen_libs.load_module\")\n def test_chg_dir_good(self, mock_cfg, mock_sftp):\n\n \"\"\"Function: test_chg_dir_good\n\n Description: Test with changing directory.\n\n Arguments:\n\n \"\"\"\n\n mock_cfg.return_value = self.cfg\n mock_sftp.return_value = True\n\n sftp = sftp_class.SFTP(self.cfg_file, self.cfg_dir)\n sftp.sftp = self.sshclient\n sftp.is_connected = True\n\n self.assertTrue(sftp.chg_dir(self.chg_dir))\n\n self.assertEqual(\n (sftp.username, sftp.log_file, sftp.is_connected),\n (self.cfg.username, self.cfg.log_file, True))\n\n def tearDown(self):\n\n \"\"\"Function: tearDown\n\n Description: Clean up of unit testing.\n\n Arguments:\n\n \"\"\"\n\n if os.path.isfile(self.cfg.log_file):\n os.remove(self.cfg.log_file)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "test/unit/sftp_class/sftp_chg_dir.py", "file_name": "sftp_chg_dir.py", "file_ext": "py", "file_size_in_byte": 3504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.version_info", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "version.__version__", "line_number": 34, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sftp_class.SFTP", "line_number": 158, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 143, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 179, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "141754233", "text": "import warnings\nwarnings.simplefilter(\"ignore\")\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport matplotlib.dates as mdates\nfrom multiprocessing import Pool \nimport glob\nimport pathlib\nfrom itertools import repeat\nimport projfuncs as pf\nimport os\nimport sys\nimport subprocess\nfrom sklearn.ensemble import IsolationForest\n\ndef trick(i,df, k, gamma, option):\n if i < int((k)/2):\n try:\n moving = df.iloc[:k].copy()\n midpoints = df[option].iloc[i]\n moving = moving.drop(moving.iloc[i].name)\n moving_average, std = moving[option].describe()[1], moving[option].describe()[2]\n strength = abs(midpoints - moving_average)-3*std - gamma\n if strength > 0:\n return [1,strength]\n else:\n return [0,strength]\n except IndexError:\n return [0,0]\n elif i > df.shape[0]-1-int((k)/2):\n try:\n moving = df.iloc[df.shape[0]-1-k:].copy()\n midpoints = df[option].iloc[i]\n moving = moving.drop(moving.iloc[i].name)\n moving_average, std = moving[option].describe()[1], moving[option].describe()[2]\n strength = abs(midpoints - moving_average)-3*std - gamma\n if strength > 0:\n return [1,strength]\n else:\n return [0,strength]\n except IndexError:\n return [0,0]\n else:\n try:\n moving = df.iloc[i-int((k)/2):i+int((k)/2)].copy()\n midpoints = df[option].iloc[i]\n moving = moving.drop(moving.iloc[int((k)/2)].name)\n moving_average, std = moving[option].describe()[1], moving[option].describe()[2]\n strength = abs(midpoints - moving_average)-3*std - gamma\n if strength > 0:\n return [1,strength]\n else:\n return [0,strength]\n except IndexError:\n return [0,0]\n\n\ndef k_gamma(df, k, gamma ,option):\n num = len(df)\n outliers = []\n strength = []\n if num <= k:\n return False\n with Pool(os.cpu_count()-4) as p:\n dfs = p.starmap(trick, zip(range(num),repeat(df),repeat(k),repeat(gamma), repeat(option)))\n for i in dfs:\n outliers.append(i[0])\n strength.append(i[1])\n df[\"Outliers\"] = outliers\n df[\"Strength\"] = strength\n return df\n\nif __name__ == \"__main__\":\n try:\n filetype = sys.argv[1]\n date = sys.argv[2]\n ticker = sys.argv[3]\n option1 = sys.argv[4]\n venue = sys.argv[5]\n method = int(sys.argv[6])\n if (option1 in [\"Trade\", \"Bid\", \"Ask\"]):\n if (option1 == \"Trade\"):\n option2 = \"T\"\n else:\n option2 = \"Q\" \n else:\n print(\"Invalid Input\")\n option = option1 + \" Price\"\n if (filetype == \"A\"):\n fileloc = \"/space/data/new/PLUSTICK_1619_\" + date + \".txt\"\n if (filetype == \"B\"):\n fileloc = \"/space/data/new/PLUSTICK_FI_1356_\" + date + \".txt\"\n if (filetype == \"C\"):\n fileloc = \"/space/data/new/PLUSTICK_FUTURES_666_\" + date + \".txt\"\n if (filetype == \"D\"):\n fileloc = \"/space/data/new/PLUSTICK_FUTURES_680_\" + date + \".txt\"\n \n df = pf.get_tickerData(fileloc, filetype, ticker, option2)\n \n except IndexError:\n print(\"Type 'list' to get a list of valid inputs\")\n\n filetype = pf.get_validInput(\"Type A or B or C or D files: \", 4)\n date = pf.get_validInput(\"Enter Date in yyyymmdd: \", 0,\n filetype=filetype)\n ticker = pf.get_validInput(\"Enter One Ticker: \", 1)\n ticker = ticker[0]\n while True:\n option1 = input(\"Enter Trade, Bid, or Ask: \")\n if (option1 in [\"Trade\", \"Bid\", \"Ask\"]):\n if (option1 == \"Trade\"):\n option2 = \"T\"\n else:\n option2 = \"Q\" \n break\n else:\n print(\"Invalid Input\") \n if (filetype == \"A\"):\n fileloc = \"/space/data/new/PLUSTICK_1619_\" + date + \".txt\"\n CI = \"Contributor Id\"\n if (filetype == \"B\"):\n fileloc = \"/space/data/new/PLUSTICK_FI_1356_\" + date + \".txt\"\n CI = \"Contributor Id\"\n if (filetype == \"C\"):\n fileloc = \"/space/data/new/PLUSTICK_FUTURES_666_\" + date + \".txt\"\n CI = \"Part Code\"\n if (filetype == \"D\"):\n fileloc = \"/space/data/new/PLUSTICK_FUTURES_680_\" + date + \".txt\"\n CI = \"Part Code\"\n\n df = pf.get_tickerData(fileloc, filetype, ticker, option2)\n option = option1 + \" Price\"\n\n df[CI] = df[CI].fillna('unknown')\n venuelist = df[CI].unique() \n\n \n while True:\n method = 1\n if method in range(1, 4):\n break\n\n df = df[[\"Time\", option]]\n df[\"Outliers\"] = 0\n if (df.empty):\n print(\"Not enough data, quiting\")\n\n \n elif method == 1:\n method_name = \"K_gamma\"\n k, gamma = input(\"Please input k, gamma: \").split()\n parameters = k + '_' + gamma\n k, gamma = int(k), float(gamma) \n df = k_gamma(df, k, gamma ,option)\n\n\n# if (method != 3):\n # Directory stuff\n os.chdir(str(pathlib.Path.home()) + \"/workspace/\")\n dirpath = \"/\".join([\"/space/common/workspace/phase2_output/program9_out\", filetype, date, ticker, option1, method_name]) + \"/\"\n pathlib.Path(dirpath).mkdir(parents=True, exist_ok=True)\n \n df[\"Time\"] = pd.to_datetime(df[\"Time\"],errors = 'ignore')\n df1 = df[df[\"Outliers\"] == 1]\n clean = df[df[\"Outliers\"] == 0]\n message = \"\\n\".join([\"Method: \" + method_name,\n \"Parameters: \" + parameters, \n \"Number of outliers: \" + str(df1.shape[0]), \n \"Total number of observations: \" + str(df.shape[0]), \n \"Percentage outliers: \" + str(df1.shape[0] / df.shape[0]),\n \"Cleaned price range:\" + str(min(clean[option])) + \"-\" + str(max(clean[option])),\n \"Raw price range:\" + str(min(df[option])) + '-' + str(max(clean[option]))])\n print(message)\n fname = dirpath + ticker + '_aggregate_' + parameters\n with open(fname + \"_statistics.txt\", 'w+') as f:\n f.write(message) \n\n fig,ax = plt.subplots(figsize = (16,9))\n ax.plot(df[\"Time\"],df[option], label = \"Raw price\", color = '#dcdcdc')\n ax.plot(clean[\"Time\"],clean[option], label = \"Cleaned price\")\n ax.plot(df1[\"Time\"], df1[option],'ro', label = \"Outliers\", color = 'red')\n ax.set_xlabel(\"Time\")\n ax.set_ylabel(\"Price\")\n ax.set_title(\" | \".join([ticker, option, date, method_name]))\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n #setting major locator\n alldays = mdates.HourLocator(interval = 3)# 3H interval\n ax.xaxis.set_major_locator(alldays)\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%I %p'))\n ax.legend()\n fig.savefig(fname + \".png\")\n df1.drop(columns=[\"Outliers\"], inplace=True)\n df1.to_csv(fname + \".csv\")\n", "sub_path": "phase_2/problem9.py", "file_name": "problem9.py", "file_ext": "py", "file_size_in_byte": 7410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "warnings.simplefilter", "line_number": 2, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 64, "usage_type": "call"}, {"api_name": "os.cpu_count", "line_number": 64, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 79, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 80, "usage_type": "attribute"}, {"api_name": "projfuncs.get_tickerData", "line_number": 98, "usage_type": "call"}, {"api_name": "projfuncs.get_validInput", "line_number": 103, "usage_type": "call"}, {"api_name": "projfuncs.get_validInput", "line_number": 104, "usage_type": "call"}, {"api_name": "projfuncs.get_validInput", "line_number": 106, "usage_type": "call"}, {"api_name": "projfuncs.get_tickerData", "line_number": 131, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 159, "usage_type": "call"}, {"api_name": "pathlib.Path.home", "line_number": 159, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 161, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.dates.HourLocator", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 190, "usage_type": "name"}]} +{"seq_id": "150231367", "text": "import sys, logging, os\nsys.path.append(\"../../src\")\nos.environ[\"HULU_ENV\"] = \"test\"\n\nimport unittest, db\nimport mock\n\nclass TestDatabase(unittest.TestCase):\n def setUp(self):\n logging.basicConfig(level=logging.INFO)\n self.redis = mock.Mock()\n self.client = db.Database(self.redis)\n\n def test_incr_vote_calls_redis_once(self):\n self.client.incrVote(\"test_key\")\n self.redis.incr.assert_called_with(\"test_key\")\n\n def test_when_redis_throws_exceptions_we_retry(self):\n self.redis.side_effect = [Exception(\"Redis Failed\"), \"test_value\"]\n self.client.getVote(\"test_key\")\n\n def test_when_redis_timesout_too_often_we_raise_exception(self):\n self.redis.side_effect = Exception(\"Redis Failed\")\n self.client.getVote(\"test_key\")\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "test/unit/test_db.py", "file_name": "test_db.py", "file_ext": "py", "file_size_in_byte": 841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 11, "usage_type": "call"}, {"api_name": "db.Database", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "593363013", "text": "#word counter sys[1] = file dir, sys[2] = 1(all), 2(top 10), 3(number of type), other(all)\n\nimport sys\nfrom collections import Counter\n\ndef word_count(f_name, mode = 1):\n word_list = Counter()\n with open(f_name) as f:\n for line in f:\n word_list += Counter(line.split())\n if mode == 1:\n for word, n in word_list.most_common():\n print(word, n)\n elif mode == 2:\n for word, n in word_list.most_common(10):\n print(word, n)\n elif mode == 3:\n print('異なり数:{}'.format(len(word_list)))\n\n\n\nif __name__ == '__main__':\n input_file = sys.argv[1]\n try:\n mode = int(sys.argv[2])\n if mode not in [1,2,3]:\n mode = 1\n except:\n mode = 1\n word_count(input_file, mode)\n\n", "sub_path": "kurosawa/tutorial00/word-count.py", "file_name": "word-count.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.Counter", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "460305244", "text": "import visdom\nimport functools\nfrom anyfig import cfg\nfrom .utils import plotly_plots as plts\nimport plotly.graph_objects as go\nimport torch\nimport numpy as np\n\n\ndef clear_old_data(vis):\n [vis.close(env=env) for env in vis.get_env_list()] # Kills wind\n # [vis.delete_env(env) for env in vis.get_env_list()] # Kills envs\n\n\ndef log_if_active(func):\n ''' Decorator which only calls logging function if logger is active '''\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if cfg().misc.log_data:\n return func(self, *args, **kwargs)\n\n return wrapper\n\n\nclass Logger():\n def __init__(self):\n if cfg().misc.log_data:\n try:\n self.vis = visdom.Visdom()\n clear_old_data(self.vis)\n except Exception as e:\n err_msg = \"Couldn't connect to Visdom. Make sure to have a Visdom server running or turn of logging in the config\"\n raise ConnectionError(err_msg) from e\n\n @log_if_active\n def log_image(self, image):\n self.vis.image(image)\n\n @log_if_active\n def log_accuracy(self, accuracy, step, name='train'):\n title = f'{name} Accuracy'.title()\n plot = plts.accuracy_plot(self.vis.line, title)\n plot(X=[step], Y=[accuracy])\n\n @log_if_active\n def log_parameters(self, text):\n self.vis.text(text.replace('\\n', '
'))\n\n @log_if_active\n def log_gradients(self, model, layer_name):\n layers = []\n ave_grads = []\n max_grads = []\n\n for name, param in model.named_parameters():\n if param.grad is None or 'bias' in name or layer_name not in name:\n continue\n\n layers.append(name)\n ave_grads.append(param.grad.abs().mean().item())\n max_grads.append(param.grad.abs().max().item())\n\n Y = np.array([ave_grads, max_grads]).T\n title = f'gradients {layer_name}'.title()\n opts = dict(title=title)\n self.vis.line(Y=Y, win=title, opts=opts)\n", "sub_path": "src/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "anyfig.cfg", "line_number": 19, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 17, "usage_type": "call"}, {"api_name": "anyfig.cfg", "line_number": 27, "usage_type": "call"}, {"api_name": "visdom.Visdom", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.plotly_plots.accuracy_plot", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.plotly_plots", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "6628464", "text": "import json\nimport os\nimport shutil\nimport argparse\n\nimport requests\n\ntemp_dir = \"temp_dl\"\n\ndef main(source, query_string, output_file):\n\n url = source + \"/seg-71-v1-a1.ts?\" + query_string\n\n response = requests.get(url)\n\n if response.status_code !=200:\n print(\"Query strings like Signature and Policy might have expired OR you don't have access to video\")\n exit()\n\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n\n count = 1\n fileList = []\n print(\"Downloading video segments\")\n while response.status_code == 200:\n curr_seg = f\"/seg-{count}-v1-a1.ts?\"\n response = requests.get(source + curr_seg + query_string)\n with open(temp_dir + curr_seg, 'wb') as f:\n f.write(response.content)\n fileList.append(temp_dir + curr_seg)\n count += 1\n\n if os.path.getsize(fileList[-1]) < 1000:\n os.remove(fileList[-1])\n fileList = fileList[:-1]\n\n print(\"stiching video segments\")\n with open(output_file, 'wb') as stitched:\n for filename in fileList:\n with open(os.path.join(\"\", filename), 'rb') as part:\n shutil.copyfileobj(part, stitched)\n\n #cleanup downloaded files\n for filename in fileList:\n os.remove(filename)\n if len(os.listdir(temp_dir)) == 0:\n os.removedirs(temp_dir)\n\n print(\"Done - o ti pari\")\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser(description='Download and stitch private kaltura videos')\n parser.add_argument('-src', action='store', dest='src', default=None, help='URL ending in .../a.mp4')\n parser.add_argument('-qstr', action='store', dest='qstr', default=None, help=\"everything after the '.../seg-xx-v1-a1.ts?'\" )\n parser.add_argument('-output', action='store', dest='output', default=None, help='Output file name')\n\n args = parser.parse_args()\n main(args.src, args.qstr, args.output)\n\n\n\n", "sub_path": "fetch_videos.py", "file_name": "fetch_videos.py", "file_ext": "py", "file_size_in_byte": 1920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "shutil.copyfileobj", "line_number": 42, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 46, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.removedirs", "line_number": 48, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "11447712", "text": "#\n# Imports\n#\n\nimport os\nimport cgi\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\n\n#\n# Datastore\n#\n\n#class Page(db.Model):\n# cookieval = db.IntegerProperty\n\n\n#\n# Request handlers\n#\n\nclass CountryTopPage(webapp.RequestHandler):\n def get(self):\n wikipedia_name = self.request.get('name')\n google_name = wikipedia_name\n\n #\n # Render page using django template language, and control parameters from above\n #\n\n template_values = { \n 'wikipedia_name' : wikipedia_name,\n 'google_name' : google_name\n }\n\n path = os.path.join(os.path.dirname(__file__), 'templates/country_top.html')\n self.response.out.write(template.render(path, template_values))\n\n\nclass CountryPage(webapp.RequestHandler):\n def get(self):\n wikipedia_country_name = self.request.get('name')\n html = \"\"\"\n \n \n View country : %(name)s\n \n \n \n \n \n \n \n \n \n\n\"\"\" % {'name':wikipedia_country_name}\n self.response.out.write(html)\n\n\n##\n## Url switching table\n##\n\napplication = webapp.WSGIApplication([\n ('/country', CountryPage),\n ('/country_top', CountryTopPage),\n ], debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "country.py", "file_name": "country.py", "file_ext": "py", "file_size_in_byte": 2040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 27, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 42, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 42, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 45, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 45, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.WSGIApplication", "line_number": 70, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp", "line_number": 70, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.run_wsgi_app", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "229615557", "text": "import sexpdata\n\n\ndef normalise_tree(tree):\n\n def _normalise_tree(tree, wrap=True):\n from tree import Tree\n\n if type(tree) is list and len(tree) > 1:\n fun, *params = tree\n assert len(params)\n result = [fun.value(), *(map(_normalise_tree, params))]\n elif type(tree) is list:\n result = float(tree[0])\n else:\n result = tree\n\n if wrap:\n result = Tree(result)\n return result\n\n return _normalise_tree(tree, wrap=False)\n\n\ndef parse(expression):\n return normalise_tree(sexpdata.loads(expression))\n", "sub_path": "niso-bxk561/src/lab/parsing.py", "file_name": "parsing.py", "file_ext": "py", "file_size_in_byte": 606, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tree.Tree", "line_number": 19, "usage_type": "call"}, {"api_name": "sexpdata.loads", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "594413916", "text": "import threading\r\nimport time\r\n\r\nimport usb.core\r\nimport usb.util\r\n\r\nfrom ThreadConstants import *\r\n\r\nSTATUS_BAD_STATE = 204\r\nSTATUS_OK = 206\r\nSTATUS_PACKET_REJECTED = 207\r\nSTATUS_FINISH = 236\r\nSTATUS_BUSY = 238\r\nSTATUS_POWER = 239\r\n\r\nSTATUS_NO_DEVICE = -1\r\n\r\n\r\ndef get_code_string_from_code(code):\r\n if code == STATUS_OK:\r\n return \"OK\"\r\n elif code == STATUS_BUSY:\r\n return \"Busy\"\r\n elif code == STATUS_PACKET_REJECTED:\r\n return \"Rejected\"\r\n elif code == STATUS_FINISH:\r\n return \"Finish\"\r\n elif code == STATUS_POWER:\r\n return \"Low Power\"\r\n elif code == STATUS_BAD_STATE:\r\n return \"Bad State\"\r\n else:\r\n return \"Unknown: \" + str(code)\r\n\r\n\r\ndef convert_to_list_bytes(data):\r\n if isinstance(data, str): # python 2\r\n packet = [0] * 30\r\n for i in range(0, 30):\r\n packet[i] = ord(data[i])\r\n return packet\r\n else:\r\n packet = [0] * 30\r\n for i in range(0, 30):\r\n packet[i] = data[i]\r\n return packet\r\n\r\n\r\ncrc_table = [\r\n 0x00, 0x5E, 0xBC, 0xE2, 0x61, 0x3F, 0xDD, 0x83,\r\n 0xC2, 0x9C, 0x7E, 0x20, 0xA3, 0xFD, 0x1F, 0x41,\r\n 0x00, 0x9D, 0x23, 0xBE, 0x46, 0xDB, 0x65, 0xF8,\r\n 0x8C, 0x11, 0xAF, 0x32, 0xCA, 0x57, 0xE9, 0x74]\r\n\r\n\r\ndef onewire_crc_lookup(line):\r\n \"\"\"\r\n License: 2-clause \"simplified\" BSD license\r\n Copyright (C) 1992-2017 Arjen Lentz\r\n https://lentz.com.au/blog/calculating-crc-with-a-tiny-32-entry-lookup-table\r\n\r\n :param line: line to be CRC'd\r\n :return: 8 bit crc of line.\r\n \"\"\"\r\n crc = 0\r\n for i in range(0, 30):\r\n crc = line[i] ^ crc\r\n crc = crc_table[crc & 0x0f] ^ crc_table[16 + ((crc >> 4) & 0x0f)]\r\n return crc\r\n\r\n\r\nclass ControllerQueueThread(threading.Thread):\r\n def __init__(self, controller):\r\n threading.Thread.__init__(self)\r\n self.controller = controller\r\n self.state = THREAD_STATE_UNSTARTED\r\n self.controller.listener(\"control_thread\", self.state)\r\n\r\n def run(self):\r\n self.state = THREAD_STATE_STARTED\r\n self.controller.listener(\"control_thread\", self.state)\r\n waited = 0\r\n self.controller.listener(\"status_bar\", (\"Laser On!\", 1))\r\n while self.state != THREAD_STATE_ABORT:\r\n if self.controller.process_queue():\r\n time.sleep(0.1)\r\n waited += 1\r\n if waited >= 20:\r\n break\r\n else:\r\n waited = 0\r\n while self.state == THREAD_STATE_PAUSED:\r\n self.controller.listener(\"control_thread\", self.state)\r\n time.sleep(1)\r\n self.controller.listener(\"status_bar\", (None, 1))\r\n if self.state == THREAD_STATE_ABORT:\r\n self.controller.listener(\"control_thread\", self.state)\r\n return\r\n self.state = THREAD_STATE_FINISHED\r\n self.controller.listener(\"control_thread\", self.state)\r\n\r\n\r\nclass UsbConnectThread(threading.Thread):\r\n def __init__(self, controller):\r\n threading.Thread.__init__(self)\r\n self.controller = controller\r\n\r\n def run(self):\r\n try:\r\n self.controller.open()\r\n except usb.core.USBError:\r\n pass\r\n\r\n\r\nclass UsbDisconnectThread(threading.Thread):\r\n def __init__(self, controller):\r\n threading.Thread.__init__(self)\r\n self.controller = controller\r\n\r\n def run(self):\r\n try:\r\n self.controller.close()\r\n except usb.core.USBError:\r\n pass\r\n\r\n\r\nclass K40Controller:\r\n def __init__(self, listener, usb_index=-1, usb_address=-1, usb_bus=-1, mock=False):\r\n self.listener = listener\r\n self.usb_index = usb_index\r\n self.usb_bus = usb_bus\r\n self.usb_address = usb_address\r\n self.mock = mock\r\n\r\n self.thread = ControllerQueueThread(self)\r\n self.listener(\"control_thread\", self.thread.state)\r\n\r\n self.status = None\r\n\r\n self.usb = None\r\n self.interface = None\r\n self.detached = False\r\n\r\n self.device_log = \"\"\r\n\r\n self.process_queue_pause = False\r\n self.autostart = True\r\n\r\n self.buffer = b''\r\n self.add_queue = b''\r\n self.listener(\"buffer\", 0)\r\n self.packet_count = 0\r\n self.rejected_count = 0\r\n self.queue_lock = threading.Lock()\r\n self.usb_lock = threading.Lock()\r\n\r\n self.usb_status = None\r\n self.set_usb_status(\"Uninitialized\")\r\n\r\n def __enter__(self):\r\n self.open()\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self.close()\r\n\r\n def __iadd__(self, other):\r\n self.queue_lock.acquire()\r\n self.add_queue += other\r\n self.queue_lock.release()\r\n self.listener(\"buffer\", len(self.buffer) + len(self.add_queue))\r\n if self.autostart:\r\n self.start_queue_consumer()\r\n return self\r\n\r\n def log(self, info):\r\n update = str(info) + '\\n'\r\n self.listener(\"usb_log\", update)\r\n self.device_log += update\r\n\r\n def start_usb(self):\r\n self.set_usb_status(\"Connecting\")\r\n usb_thread = UsbConnectThread(self)\r\n usb_thread.start()\r\n\r\n def stop_usb(self):\r\n self.set_usb_status(\"Disconnecting\")\r\n usb_thread = UsbDisconnectThread(self)\r\n usb_thread.start()\r\n\r\n def emergency_stop(self):\r\n self.thread.state = THREAD_STATE_ABORT\r\n packet = b'I' + b'F' * 29\r\n if self.usb is not None:\r\n try:\r\n self.send_packet(packet)\r\n except usb.core.USBError:\r\n pass # Emergency stop was a failure.\r\n self.buffer = b''\r\n self.add_queue = b''\r\n self.listener(\"buffer\", len(self.buffer))\r\n self.listener(\"control_thread\", self.thread.state)\r\n\r\n def reset_thread(self):\r\n self.thread = ControllerQueueThread(self)\r\n\r\n def start_queue_consumer(self):\r\n if self.thread.state == THREAD_STATE_ABORT:\r\n # We cannot reset an aborted thread without specifically calling reset.\r\n return\r\n if self.thread.state == THREAD_STATE_FINISHED:\r\n self.thread = ControllerQueueThread(self)\r\n if self.thread.state == THREAD_STATE_UNSTARTED:\r\n self.thread.state = THREAD_STATE_STARTED\r\n self.thread.start()\r\n self.listener(\"control_thread\", self.thread.state)\r\n\r\n def pause(self):\r\n self.process_queue_pause = True\r\n self.thread.state = THREAD_STATE_PAUSED\r\n self.listener(\"control_thread\", self.thread.state)\r\n\r\n def resume(self):\r\n self.process_queue_pause = False\r\n self.thread.state = THREAD_STATE_STARTED\r\n self.listener(\"control_thread\", self.thread.state)\r\n\r\n def process_queue(self):\r\n if self.process_queue_pause:\r\n return False\r\n if self.usb is None and not self.mock:\r\n try:\r\n self.open()\r\n except usb.core.USBError:\r\n return False\r\n wait_finish = False\r\n if len(self.add_queue):\r\n self.queue_lock.acquire()\r\n self.buffer += self.add_queue\r\n self.add_queue = b''\r\n self.queue_lock.release()\r\n self.listener(\"buffer\", len(self.buffer))\r\n if len(self.buffer) == 0:\r\n return True\r\n find = self.buffer.find(b'\\n', 0, 30)\r\n if find != -1:\r\n length = min(30, len(self.buffer), find + 1)\r\n else:\r\n length = min(30, len(self.buffer))\r\n packet = self.buffer[:length]\r\n if packet.endswith(b'-'): # edge condition of \"-\\n\" catching only the '-' exactly at 30.\r\n packet += self.buffer[length:length + 1]\r\n length += 1\r\n if packet.endswith(b'\\n'):\r\n packet = packet[:-1]\r\n if packet.endswith(b'-'):\r\n packet = packet[:-1]\r\n wait_finish = True\r\n packet += b'F' * (30 - len(packet))\r\n # try to send packet\r\n try:\r\n self.wait(STATUS_OK)\r\n if self.process_queue_pause:\r\n return False # Paused during wait.\r\n if len(packet) == 30:\r\n self.buffer = self.buffer[length:]\r\n self.listener(\"buffer\", len(self.buffer))\r\n else:\r\n return True # No valid packet was able to be produced.\r\n self.send_packet(packet)\r\n except usb.core.USBError:\r\n # Execution should have broken at wait. Therefore not corrupting packet. Failed a reconnect demand.\r\n return False\r\n if wait_finish:\r\n self.wait(STATUS_FINISH)\r\n return False\r\n\r\n def set_usb_status(self, status):\r\n if status == self.usb_status:\r\n return\r\n self.usb_status = status\r\n self.listener(\"usb_status\", self.usb_status)\r\n\r\n def open(self):\r\n self.usb_lock.acquire()\r\n self.set_usb_status(\"Connecting\")\r\n self.log(\"Attempting connection to USB.\")\r\n try:\r\n devices = usb.core.find(idVendor=0x1A86, idProduct=0x5512, find_all=True)\r\n except usb.core.NoBackendError:\r\n self.log(\"PyUsb detected no backend LibUSB driver.\")\r\n self.set_usb_status(\"No Driver\")\r\n time.sleep(1)\r\n return\r\n d = []\r\n self.usb = None\r\n for device in devices:\r\n self.log(\"K40 device detected:\\n%s\\n\" % str(device))\r\n d.append(device)\r\n if self.usb_index == -1:\r\n if self.usb_address == -1 and self.usb_bus == -1:\r\n if len(d) > 0:\r\n self.usb = d[0]\r\n else:\r\n for dev in d:\r\n if (self.usb_address == -1 or self.usb_address == dev.address) and \\\r\n (self.usb_bus == -1 or self.usb_bus == dev.bus):\r\n self.usb = dev\r\n break\r\n else:\r\n if len(d) > self.usb_index:\r\n self.usb = d[self.usb_index]\r\n for i, dev in enumerate(d):\r\n self.log(\"Device %d Bus: %d Address %d\" % (i, dev.bus, dev.address))\r\n if self.usb is None:\r\n self.set_usb_status(\"Not Found\")\r\n if len(d) == 0:\r\n self.log(\"K40 not found.\")\r\n else:\r\n self.log(\"K40 devices were found but the configuration requires #%d Bus: %d, Add: %d\"\r\n % (self.usb_index, self.usb_bus, self.usb_address))\r\n time.sleep(1)\r\n self.usb_lock.release()\r\n raise usb.core.USBError('Unable to find device.')\r\n self.usb.set_configuration()\r\n self.log(\"Device found. Using device: #%d on bus: %d at address %d\"\r\n % (self.usb_index, self.usb.bus, self.usb.address))\r\n self.interface = self.usb.get_active_configuration()[(0, 0)]\r\n try:\r\n if self.usb.is_kernel_driver_active(self.interface.bInterfaceNumber):\r\n try:\r\n self.log(\"Attempting to detach kernel\")\r\n self.usb.detach_kernel_driver(self.interface.bInterfaceNumber)\r\n self.log(\"Kernel detach: Success\")\r\n self.detached = True\r\n except usb.core.USBError:\r\n self.log(\"Kernel detach: Failed\")\r\n self.usb_lock.release()\r\n raise usb.core.USBError('Unable to detach from kernel')\r\n except NotImplementedError:\r\n self.log(\"Kernel detach: Not Implemented.\") # Driver does not permit kernel detaching.\r\n self.log(\"Attempting to claim interface.\")\r\n usb.util.claim_interface(self.usb, self.interface)\r\n # TODO: A second attempt to claim the same interface will lag out at this point.\r\n self.log(\"Interface claimed.\")\r\n self.log(\"Requesting Status.\")\r\n self.update_status()\r\n self.log(str(self.status))\r\n self.log(\"Sending control transfer.\")\r\n self.usb.ctrl_transfer(bmRequestType=64, bRequest=177, wValue=258,\r\n wIndex=0, data_or_wLength=0, timeout=5000)\r\n self.log(\"Requesting Status.\")\r\n self.update_status()\r\n self.log(str(self.status))\r\n self.log(\"USB Connection Successful.\")\r\n self.set_usb_status(\"Connected\")\r\n self.usb_lock.release()\r\n\r\n def close(self):\r\n self.usb_lock.acquire()\r\n self.set_usb_status(\"Disconnecting\")\r\n self.log(\"Attempting disconnection from USB.\")\r\n if self.usb is not None:\r\n if self.detached:\r\n self.log(\"Kernel was detached.\")\r\n try:\r\n self.log(\"Attempting kernel attach\")\r\n self.usb.attach_kernel_driver(self.interface.bInterfaceNumber)\r\n self.detached = False\r\n self.log(\"Kernel succesfully attach\")\r\n except usb.core.USBError:\r\n self.log(\"Error while attempting kernel attach\")\r\n self.usb_lock.release()\r\n raise usb.core.USBError('Unable to reattach driver to kernel')\r\n else:\r\n self.log(\"Kernel was not detached.\")\r\n self.log(\"Attempting to release interface.\")\r\n try:\r\n usb.util.release_interface(self.usb, self.interface)\r\n self.log(\"Interface released\")\r\n except usb.core.USBError:\r\n self.log(\"Interface did not exist.\")\r\n self.log(\"Attempting to dispose resources.\")\r\n usb.util.dispose_resources(self.usb)\r\n self.log(\"Resources disposed.\")\r\n self.log(\"Attempting USB reset.\")\r\n try:\r\n self.usb.reset()\r\n self.log(\"USB reset.\")\r\n except usb.core.USBError:\r\n self.log(\"USB connection did not exist.\")\r\n self.interface = None\r\n self.usb = None\r\n self.log(\"USB Disconnection Successful.\")\r\n else:\r\n self.log(\"No connection was found.\")\r\n self.set_usb_status(\"Disconnected\")\r\n self.usb_lock.release()\r\n\r\n def send_packet(self, packet_byte_data):\r\n if len(packet_byte_data) != 30:\r\n raise usb.core.USBError('We can only send 30 byte packets.')\r\n data = convert_to_list_bytes(packet_byte_data)\r\n packet = [166] + [0] + data + [166] + [onewire_crc_lookup(data)]\r\n\r\n sending = True\r\n while sending:\r\n if self.mock:\r\n time.sleep(0.02)\r\n else:\r\n # TODO: Under some cases it attempts to claim interface here and cannot. Sends USBError (None)\r\n self.usb.write(0x2, packet, 10000) # usb.util.ENDPOINT_OUT | usb.util.ENDPOINT_TYPE_BULK\r\n self.packet_count += 1\r\n self.listener(\"packet\", packet)\r\n self.listener(\"packet_text\", packet_byte_data)\r\n self.update_status()\r\n if self.status[1] != STATUS_PACKET_REJECTED:\r\n sending = False\r\n\r\n def update_status(self):\r\n if self.mock:\r\n self.status = [STATUS_OK] * 6\r\n time.sleep(0.01)\r\n else:\r\n try:\r\n self.usb.write(0x02, [160], 10000) # usb.util.ENDPOINT_IN | usb.util.ENDPOINT_TYPE_BULK\r\n except usb.core.USBError as e:\r\n self.log(\"Usb refused status check.\")\r\n while True:\r\n try:\r\n self.close()\r\n self.open()\r\n except usb.core.USBError:\r\n pass\r\n if self.usb is not None:\r\n break\r\n # TODO: will sometimes crash here after failing to actually reclaim USB connection.\r\n self.usb.write(0x02, [160], 10000)\r\n self.log(\"Sending original status check.\")\r\n self.status = self.usb.read(0x82, 6, 10000)\r\n self.listener(\"status\", self.status)\r\n\r\n def wait(self, value):\r\n i = 0\r\n while True:\r\n self.update_status()\r\n if self.mock: # Mock controller\r\n self.status = [value] * 6\r\n status = self.status[1]\r\n if status == STATUS_PACKET_REJECTED:\r\n self.rejected_count += 1\r\n if status == value:\r\n break\r\n time.sleep(0.1)\r\n self.listener(\"wait\", (value, i))\r\n i += 1\r\n", "sub_path": "K40Controller.py", "file_name": "K40Controller.py", "file_ext": "py", "file_size_in_byte": 16550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "threading.Thread", "line_number": 72, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 74, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 74, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 103, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 105, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 105, "usage_type": "attribute"}, {"api_name": "usb.core.core", "line_number": 111, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 111, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 115, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 117, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 117, "usage_type": "attribute"}, {"api_name": "usb.core.core", "line_number": 123, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 123, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 154, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 155, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 197, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 197, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 234, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 234, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 271, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 271, "usage_type": "name"}, {"api_name": "usb.core.core.find", "line_number": 289, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 289, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 289, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 290, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 290, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 293, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 322, "usage_type": "call"}, {"api_name": "usb.core.core.USBError", "line_number": 324, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 324, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 324, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 336, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 336, "usage_type": "name"}, {"api_name": "usb.core.core.USBError", "line_number": 339, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 339, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 339, "usage_type": "name"}, {"api_name": "usb.core.util.claim_interface", "line_number": 343, "usage_type": "call"}, {"api_name": "usb.core.util", "line_number": 343, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 343, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 371, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 371, "usage_type": "name"}, {"api_name": "usb.core.core.USBError", "line_number": 374, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 374, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 374, "usage_type": "name"}, {"api_name": "usb.core.util.release_interface", "line_number": 379, "usage_type": "call"}, {"api_name": "usb.core.util", "line_number": 379, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 379, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 381, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 381, "usage_type": "name"}, {"api_name": "usb.core.util.dispose_resources", "line_number": 384, "usage_type": "call"}, {"api_name": "usb.core.util", "line_number": 384, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 384, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 390, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 390, "usage_type": "name"}, {"api_name": "usb.core.core.USBError", "line_number": 402, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 402, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 402, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 409, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 423, "usage_type": "call"}, {"api_name": "usb.core.core", "line_number": 427, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 427, "usage_type": "name"}, {"api_name": "usb.core.core", "line_number": 433, "usage_type": "attribute"}, {"api_name": "usb.core", "line_number": 433, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 454, "usage_type": "call"}]} +{"seq_id": "109132537", "text": "from constract_test.irt import IRT\nimport os\nimport pickle\nimport random\nimport argparse\nfrom collections import deque\nimport numpy as np\nimport torch\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import IterableDataset, DataLoader, get_worker_info\nfrom torch.utils.tensorboard import SummaryWriter\nfrom sklearn.metrics import roc_curve, auc\n\n\nclass TripletUniformPair(IterableDataset): # 产生数据集 需要传入组信息\n def __init__(self, num_exer, user_list, pair, shuffle, num_epochs):\n self.num_exer = num_exer\n self.user_list = user_list\n self.pair = pair\n self.shuffle = shuffle\n self.num_epochs = num_epochs\n\n def __iter__(self):\n worker_info = get_worker_info()\n # Shuffle per epoch\n self.example_size = self.num_epochs * len(self.pair)\n self.example_index_queue = deque([])\n self.seed = 0\n if worker_info is not None:\n self.start_list_index = worker_info.id\n self.num_workers = worker_info.num_workers\n self.index = worker_info.id\n else:\n self.start_list_index = None\n self.num_workers = 1\n self.index = 0\n return self\n\n def __next__(self):\n if self.index >= self.example_size:\n raise StopIteration\n while len(self.example_index_queue) == 0:\n index_list = list(range(len(self.pair))) #所有pair的长度\n if self.shuffle:\n random.Random(self.seed).shuffle(index_list)\n self.seed += 1\n if self.start_list_index is not None:\n index_list = index_list[self.start_list_index::self.num_workers]\n # Calculate next start index\n self.start_list_index = (self.start_list_index + (self.num_workers - (len(self.pair) % self.num_workers))) % self.num_workers\n self.example_index_queue.extend(index_list)\n result = self._example(self.example_index_queue.popleft())\n self.index += self.num_workers\n return result\n\n def _example(self, idx):\n u = self.pair[idx][0]\n for k, v in self.pair[idx][1].items():\n i, s = int(k), int(v)\n # i = self.pair[idx][1]\n # s = self.pair[idx][2]\n return u, i, s\n\n\n\ndef main(args):\n print('============================')\n print('Loading data')\n\n # Load preprocess data\n with open(args.data, 'rb') as f:\n dataset = pickle.load(f)\n user_size, exer_size, know_size, higher_know_size = dataset['user_size'], dataset['exer_size'],dataset['know_size'],dataset['higher_know_size']\n train_user_list, test_user_list = dataset['train_user_list'], dataset['test_user_list']\n train_dict_pair, know_group, higher_know_group = \\\n dataset['train_dict_pair'], dataset['know_group'], dataset['higher_know_group']\n print('Load complete')\n print('============================')\n\n # Create dataset, model, optimizer\n dataset = TripletUniformPair(exer_size, train_user_list, train_dict_pair, True, args.n_epochs)\n loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=16, pin_memory=True)\n model = IRT(exer_size, user_size, args.batch_size).cuda()\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n writer = SummaryWriter()\n\n # Training\n smooth_loss = 0\n idx = 0\n print('============================')\n print('Start train')\n for u, i, s in loader:\n optimizer.zero_grad()\n loss = model(u, i, s)\n loss.backward()\n optimizer.step()\n writer.add_scalar('./train/loss', loss, idx)\n smooth_loss = smooth_loss * 0.99 + loss * 0.01\n if idx % args.print_every == (args.print_every - 1):\n print('loss: %.4f' % smooth_loss)\n idx += 1\n dirname = os.path.dirname(os.path.abspath(args.model))\n os.makedirs(dirname, exist_ok=True)\n torch.save(model.state_dict(), args.model)\n print('Save model complete')\n print('Train complete')\n print('============================')\n\n\nif __name__ == '__main__':\n # Parse argument\n parser = argparse.ArgumentParser()\n parser.add_argument('--data',\n type=str,\n default=\"../data_edu/Assist/data.pkl\",\n help=\"File path for data\")\n\n parser.add_argument('--n_epochs',\n type=int,\n default=5,\n help=\"Number of epoch during training\")\n\n parser.add_argument('--batch_size',\n type=int,\n default=200,\n help=\"Batch size in one iteration\")\n\n parser.add_argument('--dim',\n type=int,\n default=4,\n help=\"Dimension for embedding\")\n\n parser.add_argument('--weight_decay',\n type=float,\n default=0.025,\n help=\"Weight decay factor\")\n\n parser.add_argument('--weight_decay_1',\n type=float,\n default=0.025,\n help=\"Weight decay_1 factor\")\n\n parser.add_argument('--lamdaU',\n type=float,\n default=0.01,\n help=\"lamdaU factor\")\n\n parser.add_argument('--lamdaV_1',\n type=float,\n default=0.01,\n help=\"lamdaV_1 factor\")\n\n parser.add_argument('--lr',\n type=float,\n default=1e-3,\n help=\"Learning rate\")\n\n parser.add_argument('--print_every',\n type=int,\n default=100,\n help=\"Period for printing smoothing loss during training\")\n\n parser.add_argument('--model',\n type=str,\n default=\"../data_edu/Assist/irt/output_model.pt\",\n help=\"File path for model\")\n args = parser.parse_args()\n main(args)", "sub_path": "constract_test/test_irt.py", "file_name": "test_irt.py", "file_ext": "py", "file_size_in_byte": 6106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.utils.data.IterableDataset", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.utils.data.get_worker_info", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 28, "usage_type": "call"}, {"api_name": "random.Random", "line_number": 46, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 83, "usage_type": "call"}, {"api_name": "constract_test.irt.IRT", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 103, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 105, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "81566232", "text": "# encoding=utf-8\nimport unittest\nfrom utils.methods import set_dp_interface as set_dp_interface\nimport pymysql\nimport os\nimport configparser as cparser\nimport openpyxl\n\n\n############################### 配置文件的地址######################\nbase_dr = str(os.path.dirname(os.path.dirname(__file__)))\nbae_idr = base_dr.replace('\\\\', '/')\n# 配置文件的地址\nfile_path = bae_idr + \"/config.ini\"\nconf = cparser.ConfigParser()\nconf.read(file_path)\n\n#######################读取配置文件上###################################\nmysqlHost = conf.get(\"mysqlconf\", \"host\") # 读取配置文件上数据库主机号\nmysqlPort = conf.get(\"mysqlconf\", \"port\") # 读取配置文件数据库的端口号\nmysqlUser = conf.get(\"mysqlconf\", \"user\") # 读取配置文件数据库的账号\nmysqlPassword = conf.get(\"mysqlconf\", \"password\") # 读取配置文件数据库的密码\nmysqlName = conf.get(\"mysqlconf\", \"db_name\") # 读取配置文件数据库的名字\nwbook = openpyxl.load_workbook(conf.get(\"dmpexcel\", \"excel_path\")) # 读取配置文件的Excel文档地址\ntable2 = wbook[conf.get(\"dmpexcel\", \"table\")] # 读取配置文件的Excel文档所使用的用例table\n\n#######################配置Excel的用例地址#################################\n#\n# wbook=openpyxl.load_workbook(bae_idr + \"/testexcel/dmptest3.xlsx\")\n# table2=wbook['Test_Case2']\n# # print(table2.title)\n\n#######################根据Excel读取本接口对应的key#################################\nbegtest=[]\nendtest=[]\n\nfor i in range(1,int(table2.max_column)):\n if table2[\"A\"+str(i)].value==\"go\":\n begtest.append(i)\n\n if table2[\"L\"+str(i)].value==\"end\":\n endtest.append(i)\n\nfor j in range(1, len(begtest)):\n for k in range(2, int(begtest[j] + 1)):\n # print(\"excel:\" + table2[\"C\" + str(k)].value)\n if os.path.basename(__file__)[0:-3] == table2[\"C\" + str(k)].value:\n\n # 接口参数\n planentry_url = table2[\"D\" + str(k)].value\n totalsummary_address = table2[\"E\" + str(k)].value\n totalsummary_data = table2[\"F\" + str(k)].value\n\n # 数据库条件\n create_time = table2[\"I\" + str(k)].value\n startDate = table2[\"J\" + str(k)].value\n endDate = table2[\"K\" + str(k)].value\n break\n########################################################\n\n\nclass test_analysis_summary (unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.pymysqlconn = pymysql.connect(host=mysqlHost, port=int(mysqlPort), user=mysqlUser, passwd=mysqlPassword, db=mysqlName,\n charset=\"utf8\",cursorclass = pymysql.cursors.DictCursor)\n cls.pymysqlcursor = cls.pymysqlconn.cursor()\n\n # 接口查询\n cls.res_date = set_dp_interface(url=planentry_url + totalsummary_address,data = totalsummary_data)\n\n print(\"dmp_analysis_summary接口测试开始\")\n\n @classmethod\n def tearDownClass(cls):\n print(\"测试结束\")\n cls.pymysqlconn.close()\n\n # top 10 广告主数\n def test_playTimeLen(self):\n print(\"开始测试test_playTimeLen\")\n # 执行接口查询\n # self.res_date = set_dp_interface(url=interface.get(\"queryentry_url\") + queryentrySspSummary,data = sspSummary_data)\n print(\"接口返回结果:\" + str(self.res_date))\n inf_playTimeLen = self.res_date['data']\n # 执行SQL查询\n sql = \"SELECT COUNT(DISTINCT dsp_id) AS play_time_len FROM dsp_play_summary WHERE transaction_date BETWEEN '{0}' and '{1}';\".format(\n startDate, endDate)\n print(\"数据库查询使用语句:\" + sql)\n self.pymysqlcursor.execute(sql)\n play_time_len = self.pymysqlcursor.fetchall()\n sql_play_time_len = play_time_len[0].get(\"play_time_len\")\n # 判断 接口数据与数据库查询结果\n print('接口返回结果:{0} , 数据库查询结果:{1}'.format(len(inf_playTimeLen), len(sql_play_time_len)))\n self.assertEquals(str(len(inf_playTimeLen)), str(len(sql_play_time_len)), \"test_playTimeLen数据对不上\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "dptest_pro/sspTest/dmpcase/test_dmp_analysisDspList.py", "file_name": "test_dmp_analysisDspList.py", "file_ext": "py", "file_size_in_byte": 4133, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 15, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 65, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utils.methods.set_dp_interface", "line_number": 70, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "601832371", "text": "from django.test import TestCase\nfrom django.test.client import Client\nfrom django.core import mail\n\nimport re\nimport urllib2\n\n\nclass DjangoIsaacTestCase(TestCase):\n def setUp(self):\n self.client = Client()\n self.pages = ['/', '/bluescreen.html']\n\n\n def test_pages(self):\n for page in self.pages:\n response = self.client.get(page)\n self.assertEqual(response.status_code, 200)\n\n\n def test_foreign_links(self):\n for page in self.pages:\n response = self.client.get(page)\n links = re.findall('href=(?:\\'|\\\")((?:http|https)\\S*)(?:\\'|\\\")',\n response.content)\n for link in links:\n response = urllib2.urlopen(link)\n self.assertEqual(response.code, 200)\n\n\n def test_send_message(self):\n self.assertEqual(len(mail.outbox), 0)\n\n response = self.client.post('/', {'message': 'Message here!', 'human': ''})\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, 'Pony From isaacbythewood.com')\n\n response = self.client.post('/', {'message': 'Message here!', 'human': 'robot'})\n self.assertEqual(len(mail.outbox), 1)\n\n response = self.client.post('/', {'message': '', 'human': ''})\n self.assertEqual(len(mail.outbox), 1)\n", "sub_path": "django_isaac/core/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 11, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 27, "usage_type": "call"}, {"api_name": "django.core.mail.outbox", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 32, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 35, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 36, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 39, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "574795810", "text": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom api.views.auth_views import ObtainJWTTokenForShopOwnerView\nfrom api.views.manage_views import (\n MeView, ShopsViewSet, ShopServicesViewSet, ShopWeekWorkingHoursViewSet,\n ShopSpecialWorkingHoursViewSet, ShopClosingDaysViewSet,\n ShopSimpleServicesViewSet, ShopResourceTypesViewSet, ShopResourcesViewSet,\n ShopBookingsViewSet, BookingActionViewSet,\n)\n\nrouter = routers.SimpleRouter()\nrouter.register('shops', ShopsViewSet)\nrouter.register('bookings', BookingActionViewSet, base_name='booking-actions')\n\nshops_router = routers.NestedSimpleRouter(router, r'shops', lookup='shop')\nshops_router.register(r'services', ShopServicesViewSet)\nshops_router.register(r'simpleservices', ShopSimpleServicesViewSet, base_name='simple-service')\nshops_router.register(r'weekworkinghours', ShopWeekWorkingHoursViewSet)\nshops_router.register(r'specialworkinghours', ShopSpecialWorkingHoursViewSet)\nshops_router.register(r'closingdays', ShopClosingDaysViewSet)\nshops_router.register(r'resourcetypes', ShopResourceTypesViewSet)\nshops_router.register(r'resources', ShopResourcesViewSet)\nshops_router.register(r'bookings', ShopBookingsViewSet)\n\n\nurlpatterns = [\n url(r'^auth/$', ObtainJWTTokenForShopOwnerView.as_view(), name='shopowner-auth'),\n url(r'^me/$', MeView.as_view(), name='shopowner-me'),\n]\nurlpatterns += router.urls\nurlpatterns += shops_router.urls\n", "sub_path": "backend_prenotazioni/api/urls/manage_urls.py", "file_name": "manage_urls.py", "file_ext": "py", "file_size_in_byte": 1444, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "rest_framework_nested.routers.SimpleRouter", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework_nested.routers", "line_number": 11, "usage_type": "name"}, {"api_name": "api.views.manage_views.ShopsViewSet", "line_number": 12, "usage_type": "argument"}, {"api_name": "api.views.manage_views.BookingActionViewSet", "line_number": 13, "usage_type": "argument"}, {"api_name": "rest_framework_nested.routers.NestedSimpleRouter", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework_nested.routers", "line_number": 15, "usage_type": "name"}, {"api_name": "api.views.manage_views.ShopServicesViewSet", "line_number": 16, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopSimpleServicesViewSet", "line_number": 17, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopWeekWorkingHoursViewSet", "line_number": 18, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopSpecialWorkingHoursViewSet", "line_number": 19, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopClosingDaysViewSet", "line_number": 20, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopResourceTypesViewSet", "line_number": 21, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopResourcesViewSet", "line_number": 22, "usage_type": "argument"}, {"api_name": "api.views.manage_views.ShopBookingsViewSet", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "api.views.auth_views.ObtainJWTTokenForShopOwnerView.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "api.views.auth_views.ObtainJWTTokenForShopOwnerView", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.manage_views.MeView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.manage_views.MeView", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "618339419", "text": "import sys\n\nsys.path.extend([\"./\"])\n\nfrom data.cifar_loader import load_data\nfrom src.experiments.run_attack import *\nfrom src.classifier.secml_classifier import SVMClassifier, LogisticClassifier\nfrom src.optimizer.beta_optimizer import beta_poison, to_scaled_img\nfrom src.optimizer.flip_poisoning import flip_batch_poison\nfrom src.optimizer.white_poisoning import white_poison\nimport os\n\nif __name__ == \"__main__\":\n set_seed(444)\n d1, d2 = int(opts.ds[0]), int(opts.ds[2])\n classes = (d1, d2)\n tr, val, ts = load_data(labels=classes, n_tr=300, n_val=500, n_ts=1000)\n\n clf = LogisticClassifier() if opts.classifier == \"logistic\" else SVMClassifier(k=\"linear\")\n\n params = {\n \"n_proto\": opts.n_proto,\n \"lb\": 1,\n \"y_target\": None,\n \"y_poison\": None,\n \"transform\": to_scaled_img,\n }\n path = opts.path + \"/cifar-{}-tr{}/{}/\".format(\n opts.ds, tr.X.shape[0], opts.classifier\n )\n os.makedirs(path, exist_ok=True)\n\n if \"beta\" in opts.generator:\n name = path + \"beta_poison_k\" + str(opts.n_proto)\n run_attack(beta_poison, name, clf, tr, val, ts, params=params)\n if \"white\" in opts.generator:\n name = path + \"white_poison\"\n run_attack(white_poison, name, clf, tr, val, ts, params=params)\n if \"flip\" in opts.generator:\n name = path + \"flip\"\n run_attack(flip_batch_poison, name, clf, tr, val, ts, params=params)\n", "sub_path": "src/experiments/cifar10/run_pairs.py", "file_name": "run_pairs.py", "file_ext": "py", "file_size_in_byte": 1423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.path.extend", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "data.cifar_loader.load_data", "line_number": 17, "usage_type": "call"}, {"api_name": "src.classifier.secml_classifier.LogisticClassifier", "line_number": 19, "usage_type": "call"}, {"api_name": "src.classifier.secml_classifier.SVMClassifier", "line_number": 19, "usage_type": "call"}, {"api_name": "src.optimizer.beta_optimizer.to_scaled_img", "line_number": 26, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "src.optimizer.beta_optimizer.beta_poison", "line_number": 35, "usage_type": "argument"}, {"api_name": "src.optimizer.white_poisoning.white_poison", "line_number": 38, "usage_type": "argument"}, {"api_name": "src.optimizer.flip_poisoning.flip_batch_poison", "line_number": 41, "usage_type": "argument"}]} +{"seq_id": "605139015", "text": "from numpy import *\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import pyplot as plt \nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\nN = int(10) # number of random steps\nP = int(2*N+1) # number of positions\n\ndef U(P):\n\n\theads = array([1, 0]) # |0>\n\ttails = array([0, 1]) # |1>\n\tC00 = outer(heads, heads) # |0><0| \n\tC01 = outer(heads, tails) # |0><1| \n\tC10 = outer(tails, heads) # |1><0| \n\tC11 = outer(tails, tails) # |1><1|\n\tH = (C00 + C01 + C10 - C11)/sqrt(2.)\n\tC_hat = kron(H,H)\n\n\n\tplus = roll(eye(P), 1, axis=0)\n\tminus = roll(eye(P), -1, axis=0)\n\n\tHH = kron(C00,C00)\n\tHT = kron(C00,C11)\n\tTH = kron(C11,C00)\n\tTT = kron(C11,C11)\n\n\tRU = kron(plus, plus)\n\tRD = kron(plus, minus)\n\tLU = kron(minus, plus)\n\tLD = kron(minus, minus)\n\t \n\tS_hat = kron(HH,RU) + kron(HT,RD) + kron(TH,LU) + kron(TT,LD)\n\n\tU = dot(S_hat, kron(C_hat, eye(P**2)))\n\n\treturn U\n\ndef psiN(P,U):\n\n\tposn0 = zeros(P**2)\n\tposn0[int(((P**2)-1)/2)] = 1 \n\n\n\n\theads = array([1, 0]) # |0>\n\ttails = array([0, 1]) # |1>\n\tcoins0 = (1/2) * (kron(heads,heads) + 1j*kron(heads,tails) + 1j*kron(tails,heads) - kron(tails,tails))\n\n\n\tpsi0 = kron(coins0, posn0)\n\n\n\tpsiN = dot(linalg.matrix_power(U, N),psi0)\n\n\treturn psiN\n\ndef prob(P,psiN):\n\n\n\tprob = zeros(P**2)\n\tfor k in range(P**2): #for each position along the line...\n\t posnN = zeros(P**2)\n\t posnN[k] = 1 #make the |k>p state as a vector with only the k-th component 1 and all others 0\n\t M_hat_k = kron(eye(2**2), outer(posnN,posnN)) #create the Mk measurement operator \n\t prob[k] = inner(psiN.conjugate(), dot(M_hat_k,psiN)).real #take inner product of psiN and Mk psiN (equivalent to modulus square)\n\n\tprob = prob.reshape((P,P))\n\n\treturn prob\n\n\ndef plot(P,prob):\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\n\t# Make data.\n\tX = arange(P)\n\tY = arange(P)\n\tX, Y = meshgrid(X, Y)\n\tZ = prob\n\n\t# Plot the surface.\n\tsurf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,\n\t linewidth=0, antialiased=False)\n\n\t# Customize the z axis.\n\tax.set_zlim(0, 0.11)\n\tax.zaxis.set_major_locator(LinearLocator(10))\n\tax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n\t# Add a color bar which maps values to colors.\n\tfig.colorbar(surf, shrink=0.5, aspect=5)\n\n\tplt.show()\n\nU = U(P)\npsiN = psiN(P,U)\nprob = prob(P,psiN)\nplot(P,prob)", "sub_path": "QC_project/qwr_grid.py", "file_name": "qwr_grid.py", "file_ext": "py", "file_size_in_byte": 2350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.cm.coolwarm", "line_number": 86, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.ticker.LinearLocator", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "325763372", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = ''\n__author__ = 'jia666666'\n__time__ = '2020/3/26'\n\"\"\"\nimport requests\nimport re\nfrom pyquery import PyQuery as pq\nimport time\nfrom fake_useragent import UserAgent\n\n\n#header请求头获取\ndef strheader():\n print(\"一 正在获取https请求头\")\n headerDict,f = {},open('head.txt', 'r')#初始化字典,只读打开文件\n headersText = f.read().replace(' ', '') #读取文件并用替换句式去除文本空格\n headers = re.split('\\n', headersText) # 以换行键分割,返回列表形式数据\n for header in headers:#遍历列表\n j ,result =0, re.split(':', header, maxsplit=1)# maxsplit是执行拆分的最高次数\n for i in result:\n j = j + 1\n if j % 2 == 1:\n key = i\n if j % 2 == 0:\n headerDict[key] = i\n print(\"二 请求头获取完成\")\n f.close()\n return headerDict\n\n\n#获取返回的商品店铺的评价链接\ndef get_productlist(url):\n try:\n print(\"四 正在尝试获取商品网页\")\n head['user-agent'] = UserAgent().chrome\n response=requests.get(url,headers=head)#请求网页\n if response.status_code==200:#获取网页的状态码\n doc=pq(response.text)#网页解析\n #print(doc)\n k=doc('.product .productImg-wrap ').items()#从html文件中过滤符合条件的内容,返回的是列表形式\n print(\"五 正在分析商品ID与卖家ID\")\n with open('手机.txt', 'a+', encoding='utf-8') as f:\n for i in k:\n k=i('a').attr('href')#获取a标签中的链接,即店铺链接\n nk = re.findall('id=(.*?)&.*?&user_id=(.*?)&',k)#获取商品ID与店铺卖家ID,用于后面拼接\n URL='https://rate.tmall.com/list_detail_rate.htm?itemId='+nk[0][0]+'&sellerId='+nk[0][1]+'¤tPage=1' #拼接店铺商品的评价链接\n page=get_assesslink(URL)\n print(page)\n # for i in range(page+1):\n # URL=URL[:-1]+str(i)\n # f.write(URL+'\\n')\n else:\n print(response.status_code)\n print(\"请求\" + url + \"失败\")\n except Exception as e:\n print(e)\n\n#获取商品店铺的评价链接的最大页数\ndef get_assesslink(url):\n try:\n print(url)\n response = requests.get(url, headers=head)\n if response.status_code==200:\n # 获取评价的最大页数\n page = (re.findall('lastPage\":(.*?),', response.text))\n print(page)\n return page[0]\n else:\n print('请求最大页数失败')\n except Exception as e:\n print('获取最大页数失败,请处理错误')\n print(e)\n\n#获取商品店铺的评价页数\n\n\n#拼接关键字搜索链接\ndef get_search_URL(key):\n #\n #url='https://list.tmall.com/search_product.htm?q='+str(key)+'&type=p&vmarket=&spm=a2156.1676643.a2227oh.d100&from=mallfp..pc_1_searchbutton&s='+str(page)\n url = 'https://list.tmall.com/search_product.htm?q='+str(key)+\\\n '&type=p&vmarket=&spm=a2156.1676643.a2227oh.d100&from=mallfp..pc_1_searchbutton'\n #print(url)\n get_productlist(url)\n\n\n\nif __name__ == '__main__':\n head = strheader()#调用类,获取https请求头\n key='手机'#天猫搜索关键字\n URL=get_search_URL(key)#拼接搜索链接\n # shoplist=get_productlist(URL)#\n\n\n", "sub_path": "test/jia666_01.py", "file_name": "jia666_01.py", "file_ext": "py", "file_size_in_byte": 3536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "re.split", "line_number": 20, "usage_type": "call"}, {"api_name": "re.split", "line_number": 22, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 41, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 65, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "36491148", "text": "from django.urls import path, include\nfrom . import views\n\"\"\"\nadds routs to app\n\"\"\"\nurlpatterns = [\n \npath('', views.Home, name='home'),\npath('about/', views.About, name='about'),\npath('add/', views.get_Entry, name=\"entry\")\n]\n", "sub_path": "myjournal/entrylog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "138905385", "text": "import json\nimport re\nfrom urllib.error import URLError\n\nimport pymysql\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# def get_page_code(start_url, *, retry_times=3, charsets=('utf-8',)):\n# try:\n# for charset in charsets:\n# try:\n# html = requests.get(start_url).content.decode(charset)\n# break\n# except UnicodeDecodeError:\n# html = None\n# except URLError as e:\n# print('Error', e)\n# if retry_times > 0:\n# return get_page_code(start_url, retry_times=retry_times-1, charsets=charsets)\n# else:\n# return None\n# return html\n\n\ndef main():\n # 待访问列表,此处为单个,可以用while循环遍历\n url_list = ['https://www.zhihu.com/explore']\n header = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/64.0.3282.186 Safari/537.36',\n 'Referer': 'https://www.zhihu.com/',\n 'Host': 'www.zhihu.com'\n }\n resp = requests.get(url_list[0], headers=header)\n html = resp.content.decode('utf-8')\n conn = pymysql.connect(host='localhost', port=3306,\n db='crawler', user='root',\n passwd='123456', charset='utf8')\n\n link_list = []\n title_list = []\n content_list = []\n bs = BeautifulSoup(html, 'lxml')\n for elem in bs.select('a[class=\"question_link\"]'):\n link_list.append(elem.attrs['href'])\n title_list.append(elem.text)\n resp = requests.get('https://www.zhihu.com'+elem.attrs['href'], headers=header)\n html = resp.content.decode('utf-8')\n bs = BeautifulSoup(html, 'lxml')\n a = bs.select('span[class=\"RichText ztext CopyrightRichText-richText\"] p')\n i_list = ''\n for i in a:\n i_list += i.text\n content_list.append(i_list)\n for i in range(len(link_list)):\n with conn.cursor() as cursor:\n cursor.execute('insert into tb_zhihu(z_url, zquestion,zanswer) values(%s,%s,%s)',\n (link_list[i], title_list[i],content_list[i]))\n conn.commit()\n conn.close()\n # while len(url_list) > 0 or len(visited_list) > 1000:\n # # 将 现在的列表中取出一个,放到已经访问过的列表中\n # current_url = url_list.pop(0)\n # visited_list.add(current_url)\n # html = requests.get(current_url, headers=header)\n # if html:\n # # 拿到问题,拿到内容答案,正则找到h2的标题,和p里面的内容\n # link_regex = re.compile()\n\n # # 链接数据库\n # try:\n # pass\n # finally:\n # conn.close()\n print('执行完成')\n\n\nif __name__ == '__main__':\n main()", "sub_path": "reptile_study/day1/test/zhihu_spider.py", "file_name": "zhihu_spider.py", "file_ext": "py", "file_size_in_byte": 2837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "206777158", "text": "import numpy as np\nimport codecs\n\nfrom keras.layers import Activation\nfrom keras.layers import Dense\nfrom keras.layers import SimpleRNN\nfrom keras.models import Sequential\n\n# executed on the parent dir\nINPUT_FILE = './data/alice_in_wonderland.txt'\n\nwith codecs.open(INPUT_FILE, 'r', encoding='utf-8') as f:\n lines = [line.strip().lower() for line in f if len(line) != 0]\n text = ' '.join(lines)\n\nchars = set(text)\nnb_chars = len(chars)\nchar2index = dict((c, i) for i, c in enumerate(chars))\nindex2char = dict((i, c) for i, c in enumerate(chars))\n\nSEQLEN = 10\nSTEP = 1\n\n# for example,\n# 'it turned into a pig' ->\n# (text , label)\n# ('it turned ', 'i')\n# ('t turned i', 'n')\n# (' turned in', 't')\n# ...\n# (' into a pi', 'g')\n\ninput_chars = []\nlabel_chars = []\nfor i in range(0, len(text) - SEQLEN, STEP):\n input_chars.append(text[i:i + SEQLEN])\n label_chars.append(text[i + SEQLEN])\n\nx = np.zeros((len(input_chars), SEQLEN, nb_chars), dtype=np.bool)\ny = np.zeros((len(input_chars), nb_chars), dtype=np.bool)\nfor i, input_char in enumerate(input_chars):\n for j, ch in enumerate(input_char):\n x[i, j, char2index[ch]] = 1\n\n y[i, char2index[label_chars[i]]] = 1\n\nHIDDEN_SIZE = 128\nBATCH_SIZE = 128\nNUM_ITERATIONS = 25\nNUM_EPOCHS_PER_ITERATION = 1\nNUM_PREDS_PER_EPOCH = 100\n\nmodel = Sequential()\nmodel.add(SimpleRNN(HIDDEN_SIZE, return_sequences=False,\n input_shape=(SEQLEN, nb_chars), unroll=True))\nmodel.add(Dense(nb_chars))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n# training\nfor iteration in range(NUM_ITERATIONS):\n print('=' * 50)\n print('Iteration #: {}'.format(iteration))\n model.fit(x, y, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS_PER_ITERATION)\n test_index = np.random.randint(len(input_chars))\n test_chars = input_chars[test_index]\n print('Generating from seed: {}'.format(test_chars))\n print(test_chars, end='')\n for i in range(NUM_PREDS_PER_EPOCH):\n x_test = np.zeros((1, SEQLEN, nb_chars))\n for i, ch in enumerate(test_chars):\n x_test[0, i, char2index[ch]] = 1\n pred = model.predict(x_test, verbose=0)[0]\n y_pred = index2char[np.argmax(pred)]\n print(y_pred, end='')\n # move forwawrd with test_chars + y_pred\n test_chars = test_chars[1:] + y_pred\n print('')\n", "sub_path": "work/keras-book/alice.py", "file_name": "alice.py", "file_ext": "py", "file_size_in_byte": 2362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "codecs.open", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 40, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.SimpleRNN", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "561868900", "text": "import requests\nimport os\nfrom bs4 import BeautifulSoup\nimport concurrent.futures\nMAX_THREADS = 5\n\nclass GGScraper (object):\n \"\"\" This class is a webscraper for the op.gg website that aims to collect summoner names\n ranks, and total games played for match data collection.\n \"\"\"\n\n def __init__(self, BaseURL, fileOut, region, baseRank):\n \"\"\" GGScraper Constructor\"\"\"\n\n self.BaseURL = BaseURL\n self.file = fileOut\n self.region = region\n self.baseRank = ''.join(\n [i for i in baseRank if not i.isnumeric()]).rstrip().lower()\n self.rank_array = [\"challenger\", \"grandmaster\", \"master\",\n \"diamond\", \"platinum\", \"gold\", \"silver\", \"bronze\", \"iron\"]\n try:\n self.target_index = self.rank_array.index(self.baseRank) + 1\n except ValueError:\n self.target_index = 3\n print(\"Rank is not valid. Stopping after Master.\")\n self.bad_rank_int = 0\n self.page_tracker = 1\n self.findSummoners()\n\n def process_futures(self, fs, exe):\n \"\"\" Clear futures_done set to allow executor to continue grabbing html from urls \"\"\"\n for future in fs:\n html = future.result()\n if self.bad_rank_int > 600:\n exe.shutdown(wait=False)\n return\n dict_to_write = self.parse_html(html, self.page_tracker)\n if type(dict_to_write) is dict:\n self.write_to_file(dict_to_write)\n del dict_to_write\n self.page_tracker += 1\n fs.clear()\n\n def findSummoners(self):\n \"\"\" Find all summoners on Op.gg \"\"\"\n\n threads = MAX_THREADS\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as exe:\n # enforcing an upper-bound limit on threads to keep memory usage low using sets of future sequences\n futures_done = set()\n futures_notdone = set()\n urls = self.generate_urls()\n for url in urls:\n try:\n futures_notdone.add(exe.submit(self.download_html, url))\n # sending exe.shutdown(wait=false) to exit early generates a run-time error\n except RuntimeError:\n return\n if len(futures_notdone) >= MAX_THREADS:\n\n done, futures_notdone = concurrent.futures.wait(\n futures_notdone, return_when=concurrent.futures.FIRST_COMPLETED)\n futures_done.update(done)\n self.process_futures(futures_done, exe)\n\n def download_html(self, url):\n \"\"\" Download HTML from a URL and return\"\"\"\n\n currPage = requests.get(url)\n html = BeautifulSoup(currPage.text, 'lxml')\n return html\n\n def generate_urls(self):\n \"\"\" Generate and return a list of URLs\"\"\"\n\n urls = list()\n for i in range(1, 3000):\n urls.append(self.BaseURL + \"page={}\".format(i))\n return urls\n\n def parse_html(self, html, pagenum):\n \"\"\" Parse HTML for summoner name and rank and return a dictionary\"\"\"\n\n print(\"[+] Parsing Page Number \" +\n str(pagenum) + \"\\tRegion: \" + self.region)\n summoner_dict = {}\n results = html.find_all(\"tr\", attrs={\"class\": \"ranking-table__row\"})\n for result in results:\n try:\n SummonerName = result.find(\n \"td\", attrs={\"class\": \"select_summoner ranking-table__cell ranking-table__cell--summoner\"}).select_one(\"span\").text\n Current_Rank = result.find(\n \"td\", attrs={\"class\": \"ranking-table__cell ranking-table__cell--tier\"}).text.strip().replace(\" \", \"\")\n Summoner_Wins = result.find(\n \"div\", attrs={\"class\": \"winratio-graph__text winratio-graph__text--left\"}).text.strip()\n Summoner_Loses = result.find(\n \"div\", attrs={\"class\": \"winratio-graph__text winratio-graph__text--right\"}).text.strip()\n except AttributeError:\n continue\n Summoner_Total_Games = int(Summoner_Wins) + int(Summoner_Loses)\n # Reached bottom rank, skip summoner\n check_rank = ''.join(\n i for i in Current_Rank if not i.isnumeric()).lower()\n if check_rank not in self.rank_array[:self.target_index]:\n self.bad_rank_int += 1\n pass\n else:\n summoner_dict[SummonerName] = [\n Current_Rank, str(Summoner_Total_Games)]\n return summoner_dict\n\n def write_to_file(self, _dict):\n \"\"\" Write summoner name and summoner rank from a dictionary to .txt file\"\"\"\n\n # Check if File Exists. If it doesn't, create one\n if not os.path.exists(self.file):\n open(self.file, 'w').close()\n\n # Write to file only if Summoner does not currently exists already\n with open(self.file, \"r+\", encoding=\"utf-8\") as file:\n\n dict_to_write = _dict\n for key in dict_to_write.keys():\n summoner = key\n rank = dict_to_write[key][0]\n games = dict_to_write[key][1]\n full_input = summoner.ljust(\n 16) + \"\\t\\t\" + rank + \"\\t\\t\" + str(games) + \"\\n\"\n for line in file:\n if summoner.ljust(16) in line and rank in line:\n break\n else:\n file.write(full_input)\n file.flush()\n", "sub_path": "GGScrape.py", "file_name": "GGScrape.py", "file_ext": "py", "file_size_in_byte": 5555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 49, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 49, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 49, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.wait", "line_number": 62, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 62, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 62, "usage_type": "name"}, {"api_name": "concurrent.futures.futures", "line_number": 63, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 63, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}]} +{"seq_id": "130789365", "text": "\"\"\"\nCreated on 24 Feb 2021\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nJSON example:\n{\"pulled-on\": \"2021-02-27T08:37:09Z\", \"success\": true,\n\"installed\": [\"scs_core\", \"scs_dev\", \"scs_dfe_eng\", \"scs_host_cpc\", \"scs_mfr\", \"scs_psu\"],\n\"pulled\": [\"scs_core\", \"scs_dev\", \"scs_dfe_eng\", \"scs_host_cpc\", \"scs_mfr\", \"scs_psu\"],\n\"excluded\": []}\n\"\"\"\n\nfrom collections import OrderedDict\nfrom subprocess import Popen, PIPE, TimeoutExpired\n\nfrom scs_core.data.datetime import LocalizedDatetime\nfrom scs_core.data.json import PersistentJSONable\n\nfrom scs_core.sys.filesystem import Filesystem\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nclass GitPull(PersistentJSONable):\n \"\"\"\n classdocs\n \"\"\"\n\n # ----------------------------------------------------------------------------------------------------------------\n\n __DIR_PREFIX = 'scs_'\n __EXCLUSIONS = ('scs_exegesis', 'scs_exegesis_modelling', 'scs_experimental', 'scs_inference', 'scs-installer')\n\n @classmethod\n def excludes(cls, name):\n return name in cls.__EXCLUSIONS\n\n\n @classmethod\n def dirs(cls, root):\n items = Filesystem.ls(root)\n\n if not items:\n return tuple()\n\n return tuple(item.name for item in items if item.is_directory and item.name.startswith(cls.__DIR_PREFIX))\n\n\n @classmethod\n def is_clone(cls, path):\n items = Filesystem.ls(path)\n\n if not items:\n return False\n\n return '.git' in [item.name for item in items]\n\n\n @classmethod\n def pull_repo(cls, path, timeout):\n try:\n p = Popen(['git', '-C', path, 'pull'], stdout=PIPE, stderr=PIPE)\n stdout_bytes, stderr_bytes = p.communicate(timeout=timeout)\n\n success = p.returncode == 0\n stdout = stdout_bytes.decode()\n stderr = stderr_bytes.decode()\n\n return success, stdout, stderr\n\n except TimeoutExpired:\n raise TimeoutError(timeout)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n __FILENAME = \"git_pull.json\"\n\n @classmethod\n def persistence_location(cls):\n return cls.conf_dir(), cls.__FILENAME\n\n\n @classmethod\n def construct_from_jdict(cls, jdict, skeleton=False):\n if not jdict:\n return cls(None, False, [], [], []) if skeleton else None\n\n pulled_on = LocalizedDatetime.construct_from_jdict(jdict.get('pulled-on'))\n success = jdict.get('success')\n\n installed = jdict.get('installed')\n pulled = jdict.get('pulled')\n excluded = jdict.get('excluded')\n\n return cls(pulled_on, success, installed, pulled, excluded)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __init__(self, pulled_on, success, installed, pulled, excluded):\n \"\"\"\n Constructor\n \"\"\"\n super().__init__()\n\n self.__pulled_on = pulled_on # LocalizedDatetime\n self.__success = success # bool\n\n self.__installed = installed # array of strings\n self.__pulled = pulled # array of strings\n self.__excluded = excluded # array of strings\n\n\n def __eq__(self, other):\n try:\n return self.pulled_on == other.pulled_on and self.success == other.success and \\\n self.installed == other.installed and self.pulled == other.pulled and \\\n self.excluded == other.excluded\n\n except (TypeError, AttributeError):\n return False\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def is_comprehensive(self):\n return self.installed == self.pulled\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def as_json(self):\n jdict = OrderedDict()\n\n jdict['pulled-on'] = None if self.pulled_on is None else self.pulled_on.as_iso8601()\n jdict['success'] = self.success\n\n jdict['installed'] = self.installed\n jdict['pulled'] = self.pulled\n jdict['excluded'] = self.excluded\n\n return jdict\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n @property\n def pulled_on(self):\n return self.__pulled_on\n\n\n @property\n def success(self):\n return self.__success\n\n\n @property\n def installed(self):\n return self.__installed\n\n\n @property\n def pulled(self):\n return self.__pulled\n\n\n @property\n def excluded(self):\n return self.__excluded\n\n\n # ----------------------------------------------------------------------------------------------------------------\n\n def __str__(self, *args, **kwargs):\n return \"GitPull:{pulled_on:%s, success:%s, installed:%s, pulled:%s, excluded:%s}\" % \\\n (self.pulled_on, self.success, self.installed, self.pulled, self.excluded)\n", "sub_path": "src/scs_core/estate/git_pull.py", "file_name": "git_pull.py", "file_ext": "py", "file_size_in_byte": 5221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scs_core.data.json.PersistentJSONable", "line_number": 24, "usage_type": "name"}, {"api_name": "scs_core.sys.filesystem.Filesystem.ls", "line_number": 41, "usage_type": "call"}, {"api_name": "scs_core.sys.filesystem.Filesystem", "line_number": 41, "usage_type": "name"}, {"api_name": "scs_core.sys.filesystem.Filesystem.ls", "line_number": 51, "usage_type": "call"}, {"api_name": "scs_core.sys.filesystem.Filesystem", "line_number": 51, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 62, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 62, "usage_type": "name"}, {"api_name": "subprocess.TimeoutExpired", "line_number": 71, "usage_type": "name"}, {"api_name": "scs_core.data.datetime.LocalizedDatetime.construct_from_jdict", "line_number": 89, "usage_type": "call"}, {"api_name": "scs_core.data.datetime.LocalizedDatetime", "line_number": 89, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "233098542", "text": "# -*- coding: utf-8 -*-\r\nimport scrapy\r\nfrom ..items import che300_price\r\nimport time\r\nfrom scrapy.conf import settings\r\nfrom scrapy.mail import MailSender\r\nimport logging\r\nfrom hashlib import md5\r\nimport re\r\nimport json\r\nimport datetime\r\n\r\nwebsite = 'che300_city'\r\nclass CarSpider(scrapy.Spider):\r\n name = website\r\n allowed_domains = [\"che300.com\"]\r\n\r\n start_urls=[\r\n 'https://dingjia.che300.com/api/lib/util/city/prov_with_city'\r\n ]\r\n\r\n def __init__(self, **kwargs):\r\n # args\r\n super(CarSpider, self).__init__(**kwargs)\r\n\r\n # problem report\r\n self.mailer = MailSender.from_settings(settings)\r\n self.carnum = 500\r\n self.counts = 0\r\n # Mongo\r\n settings.set('CrawlCar_Num', self.carnum, priority='cmdline')\r\n settings.set('MONGODB_DB', 'usedcar_evaluation', priority='cmdline')\r\n settings.set('MONGODB_COLLECTION', website, priority='cmdline')\r\n\r\n #pro_city select\r\n #brandselect\r\n def parse(self, response):\r\n\r\n data = response.xpath('//p/text()').extract_first()\r\n t = re.findall(r'\\{\\\"prov_name.*?\\}\\]\\}',data)\r\n for i in range(len(t)):\r\n p = json.loads(t[i])\r\n provid = p['prov_id']\r\n provname = p['prov_name']\r\n da = p['data']\r\n # print da\r\n for j in da:\r\n cityname = j['city_name']\r\n cityid = j['city_id']\r\n citycode = j['city_code']\r\n procity = {'provid':provid, 'provname':provname ,'cityid':cityid, 'cityname':cityname}\r\n item = che300_price()\r\n item['provname'] = procity['provname']\r\n item['provid'] = procity['provid']\r\n item['cityname'] = procity['cityname']\r\n item['cityid'] = procity['cityid']\r\n item['url'] = response.url\r\n item['grabtime'] = time.strftime('%Y-%m-%d %X', time.localtime())\r\n item['status'] = md5(response.url+\"-\"+procity['provid']+\"-\"+procity['cityid']).hexdigest()\r\n yield item", "sub_path": "cagey/che300/che300/spiders/che300_region.py", "file_name": "che300_region.py", "file_ext": "py", "file_size_in_byte": 2093, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scrapy.Spider", "line_number": 14, "usage_type": "attribute"}, {"api_name": "scrapy.mail.MailSender.from_settings", "line_number": 27, "usage_type": "call"}, {"api_name": "scrapy.conf.settings", "line_number": 27, "usage_type": "argument"}, {"api_name": "scrapy.mail.MailSender", "line_number": 27, "usage_type": "name"}, {"api_name": "scrapy.conf.settings.set", "line_number": 31, "usage_type": "call"}, {"api_name": "scrapy.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "scrapy.conf.settings.set", "line_number": 32, "usage_type": "call"}, {"api_name": "scrapy.conf.settings", "line_number": 32, "usage_type": "name"}, {"api_name": "scrapy.conf.settings.set", "line_number": 33, "usage_type": "call"}, {"api_name": "scrapy.conf.settings", "line_number": 33, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "items.che300_price", "line_number": 52, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 58, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 58, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "104274712", "text": "from github import Github\n\nclass GithubWrapper(object):\n def __init__(self, api_key, organization):\n self.api_key = api_key\n self.organization = organization\n\n def list_repositories(self):\n repos = []\n\n client = Github(self.api_key)\n for repo in client.get_organization(self.organization).get_repos():\n repos.append(repo.ssh_url)\n\n return sorted(repos)\n", "sub_path": "lib/github.py", "file_name": "github.py", "file_ext": "py", "file_size_in_byte": 412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "github.Github", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "439856027", "text": "# coding: utf-8\n\n\"\"\"\nExport file to Parquet format\n\"\"\"\n\nfrom typing import Union\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom ..utils.logging import get_sub_logger\nfrom .common import check_suffix_, check_type_\n\n\nlogger = get_sub_logger('io.parquet')\n\n\nNAME = 'parquet'\nVALID_EXTENSIONS = ('.pqt', '.parquet')\nDEFAULT_EXTENSION = '.parquet'\nDEFAULT_READ_OPTS = dict(engine='pyarrow')\nDEFAULT_WRITE_OPTS = dict(index=False, engine='pyarrow')\nVALID_TYPES = (pd.DataFrame, pd.Series)\n\n\ndef check_suffix(path: Union[Path, str], raise_error: bool=False):\n return check_suffix_(path, VALID_EXTENSIONS, DEFAULT_EXTENSION, raise_error=raise_error)\n\n\ndef check_type(data, raise_error=True):\n return check_type_(data, NAME, VALID_TYPES, raise_error=raise_error)\n\n\ndef write(data: pd.DataFrame, path: Union[str, Path], fix_suffix: bool=True, **kw):\n \"\"\"\n Write dataframe to Parquet\n \"\"\"\n check_type(data)\n path = check_suffix(path, raise_error=not fix_suffix)\n\n if not path.parent.is_dir():\n path.parent.mkdir(parents=True)\n\n opts = dict(DEFAULT_WRITE_OPTS, **kw)\n\n data.to_parquet(path, **opts)\n logger.info(f'data exported to Parquet file: {path}')\n\n return path\n\n\ndef read(path: Union[str, Path], fix_suffix: bool=True, **kw):\n \"\"\"\n Read dataframe from Parquet\n \"\"\"\n path = check_suffix(path, raise_error=not fix_suffix)\n\n opts = dict(DEFAULT_READ_OPTS, **kw)\n\n df = pd.read_parquet(path, **opts)\n logger.info(f'Parquet data loaded from: {path}')\n\n return df", "sub_path": "{{cookiecutter.project_slug}}/{{cookiecutter.app_name}}/io/parquet.py", "file_name": "parquet.py", "file_ext": "py", "file_size_in_byte": 1524, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "utils.logging.get_sub_logger", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 27, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "name"}, {"api_name": "common.check_suffix_", "line_number": 28, "usage_type": "call"}, {"api_name": "common.check_type_", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 35, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 53, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.read_parquet", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "347420615", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 11 10:55:11 2021\r\n\r\n@author: vidhya\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import norm,expon,binom,poisson\r\n\r\n# Uniform\r\nvalues = np.random.uniform(-10, 10, 100000)\r\nplt.hist(values, 50)\r\nplt.show()\r\n\r\n# GAussian Normal\r\nx = np.arange(-3, 3, 0.01)\r\nplt.plot(x, norm.pdf(x))\r\nplt.show()\r\n\r\n\r\nmean = 5\r\nsigma = 2\r\nnormal = np.random.normal(mean, sigma, 10000)\r\nplt.hist(normal, 50)\r\nplt.show()\r\n\r\n# Exponential\r\nx = np.arange(0, 10, 0.01)\r\nplt.plot(x, expon.pdf(x))\r\nplt.show()\r\n\r\n# binomial\r\nn, p = 10, 0.5\r\nx = np.arange(0, 10, 0.001)\r\nplt.plot(x, binom.pmf(x, n, p))\r\nplt.show()\r\n\r\n# poisson\r\nmu = 500\r\nx = np.arange(400, 600, 0.5)\r\nplt.plot(x, poisson.pmf(x, mu))\r\nplt.show()\r\n\r\n", "sub_path": "Statistics/Distribution.py", "file_name": "Distribution.py", "file_ext": "py", "file_size_in_byte": 769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.random.uniform", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "scipy.stats.expon.pdf", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.stats.expon", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "scipy.stats.binom.pmf", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.stats.binom", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "scipy.stats.poisson.pmf", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.stats.poisson", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "410522210", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\n\nclass GaokaowSpider(scrapy.Spider):\n name = 'gaokaow'\n allowed_domains = ['gaokaow.cc']\n start_urls = [f'http://jdzzy.gaokaow.cc/tzySearch/colleges/search?page={page}' for page in range(1, 144)]\n\n def parse(self, response):\n # 提取数据\n selectors = response.xpath('//ul[@class=\"uzy-college-list\"]/li')\n for selector in selectors:\n # url = selector.xpath('.//a[@class=\"name\"]/@href').get()\n college_id = selector.xpath('.//button[@type=\"button\"]/@data-collegeid').get()\n # 获取高考分数的值 先要传送这个网址 855是不变的 只有后面的collegeId是改变的\n url = f'http://jdzzy.gaokaow.cc/Data/ScoreLines/UCodes/QueryList?provinceId=855&collegeId={college_id}'\n\n # 表单构造不成功 没有comment-type ??\n\n # formdata = {\n # 'provinceId': '855',\n # 'collegeId': f'{college_id}'\n # }\n # print(formdata)\n\n school_name = selector.xpath('.//a[@class=\"name\"]/text()').get()\n school_name = school_name.strip() if school_name else ''\n\n li_shu = selector.xpath('.//ul/li[@class=\"quarter_1\"]/text()').get()\n li_shu = ''.join([li.strip() for li in list(li_shu)])\n\n xing_zhi = selector.xpath('.//ul/li[@class=\"quarter_2\"]/text()').get()\n xing_zhi = ''.join([xing.strip() for xing in list(xing_zhi)])\n\n pai_ming = selector.xpath('.//div[@class=\"rank\"]//span/text()').get()\n pai_ming = pai_ming.strip() if pai_ming else ''\n\n item = {\n 'school_name': school_name,\n 'li_shu': li_shu,\n 'xing_zhi': xing_zhi,\n 'pai_ming': pai_ming,\n }\n\n yield scrapy.FormRequest(url, method='POST', callback=self.parse_extract, meta=item)\n\n # 提取 uCodeNum\n def parse_extract(self, response):\n\n item = response.meta\n\n # school_name = response.meta.get('school_name')\n # li_shu = response.meta.get('li_shu')\n # xing_zhi = response.meta.get('xing_zhi')\n # pai_ming = response.meta.get('pai_ming')\n\n json_data = json.loads(response.text)\n if json_data.get('result'):\n for data in json_data['result']:\n # provinceName = data.get('provinceName')\n uCodeNum = data.get('uCodeNum')\n collegeName = data.get('collegeName')\n admissCode = data.get('admissCode')\n # collegeId = data.get('collegeId')\n # 获取高考分数\n url = f'http://jdzzy.gaokaow.cc/Data/ScoreLines/Fractions/Professions/Query'\n\n item.update(\n {\n # 'provinceName': provinceName,\n # 'uCodeNum': uCodeNum,\n 'collegeName': collegeName,\n 'admissCode': admissCode,\n # 'collegeId': collegeId,\n }\n )\n\n for year in range(2016, 2019): # 控制年份\n for courseType in range(0, 2):\n form_data = {\n 'batch': '0',\n 'courseType': f'{courseType}',\n 'uCode': f\"{uCodeNum}\",\n 'yearFrom': f'{year}',\n 'yearTo': f'{year}',\n }\n yield scrapy.FormRequest(url, method='POST', formdata=form_data, callback=self.parse_detail, meta=item)\n\n\n def parse_detail(self, response):\n\n item = response.meta\n\n json_data = json.loads(response.text)\n if json_data.get('result'):\n for data in json_data['result']:\n maxScore = data.get('maxScore') # 最高分\n minScore = data.get('minScore') # 最低分\n avgScore = data.get('avgScore') # 平均分\n lowSort = data.get('lowSort') # 最低位次\n enterNum = data.get('enterNum') # 录取数\n year = data.get('year') # 年份\n courseType = data.get('courseType') # 0 --> 理科 1 --> 文科\n professionCode = data.get('professionCode') # 专业代码\n professionName = data.get('professionName') # 专业名称\n remarks = data.get('remarks') # 专业名称\n professionName = professionName + remarks\n batchName = data.get('batchName') # 招生批次\n\n # item.update(\n # {\n # 'maxScore':maxScore,\n # 'minScore':minScore,\n # 'enterNum':enterNum,\n # 'batchName':batchName,\n # 'year':year,\n # 'course':course,\n # }\n # )\n\n all_data = [item.get(\"collegeName\"),item.get(\"li_shu\"),item.get(\"xing_zhi\"),item.get(\"pai_ming\"),year,professionCode,professionName,courseType,batchName,maxScore,avgScore,minScore,lowSort,enterNum]\n\n items = {\n 'all_data': all_data\n }\n yield items", "sub_path": "2019-05-19-金点子志愿/金点子详细版本/gaokaowSpider/gaokaowSpider/spiders/gaokaow.py", "file_name": "gaokaow.py", "file_ext": "py", "file_size_in_byte": 5309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 89, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "289143736", "text": "from flask import Flask, request, render_template\nimport os\nimport urllib.request\nimport ssl\nimport re\nimport csv\nimport sys\nimport pathlib\nimport psycopg2\nimport json\nparent_dir = str(pathlib.Path(__file__).parent.parent.resolve())\nsys.path.append(parent_dir)\nfrom common.c_debug import logger\nfrom common.c_ping import Pings\nfrom device.a_displayer import ApiDisplayer\n\n\nconfig_type = {\n \"development\": \"config.Development\",\n \"production\": \"config.Production\",\n \"testing\": \"config.Testing\"\n}\n\n\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_object(config_type.get(os.getenv(\"FLASK_APP_ENV\", \"production\")))\napp.config.from_pyfile('config.cfg')\n\n\ndef targets_csv_path():\n return '/home/pi/syateki_center_server/web/targets.csv'\n\n\ndef set_target():\n hosts = []\n results = []\n for n in range(12):\n hosts.append(\"192.168.100.\" + str(200 + n))\n results = Pings().scan(hosts)\n ApiDisplayer().disp_connectivity(results)\n targets = []\n for i, r in enumerate(results):\n if r:\n targets.append(\"192.168.100.\" + str(200 + i))\n logger().info('targets = %s', targets)\n with open(targets_csv_path(), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(targets)\n\n\ndef get_target_num(response):\n return re.search(r'(?<==)\\d+', response).group()\n\n\ndef get_hit_num(targets, gun_num):\n ssl._create_default_https_context = ssl._create_unverified_context\n logger().info('targets = %s', targets)\n #if not targets:\n # return (-1)\n for i, t in enumerate(targets):\n logger().info('connect to target: ' + str(i))\n # url_target = 'http://' + t\n url_target = 'http://' + t + '?gun_num=' + gun_num\n req = urllib.request.Request(url_target)\n target_num = '0'\n with urllib.request.urlopen(req) as res:\n res_html = res.read().decode('utf-8')\n # print('score_site res = ' + res_html)\n target_num = get_target_num(res_html)\n # logger().info('target_num = ' + target_num)\n if target_num == gun_num:\n logger().info('hit_num = ' + str(i + 1))\n return (i + 1)\n return (-1)\n\n\ndef init_targets(targets):\n ssl._create_default_https_context = ssl._create_unverified_context\n for i, t in enumerate(targets):\n logger().info('initialize target: ' + str(i))\n # url_target = 'http://' + t\n url_target = 'http://' + t + '/init'\n req = urllib.request.Request(url_target)\n with urllib.request.urlopen(req) as res:\n res_html = res.read().decode('utf-8')\n logger().info('init response: ' + res_html)\n return\n\n\ndef get_connection():\n return psycopg2.connect(database=app.config['DB_NAME'],\n user=app.config['DB_USER'],\n password=app.config['DB_PASSWORD'],\n host=app.config['DB_HOST'],\n port=app.config['DB_PORT'])\n\n\ndef regist_score(time, score):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(\"INSERT INTO scores (time, score) VALUES (%s, %s) RETURNING id\", (time, score))\n id = cur.fetchone()\n conn.commit()\n cur.close()\n conn.close()\n logger().info('resisterd id = ' + str(id[0]))\n return id[0]\n\n\ndef get_rank(id):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT score_rank FROM (SELECT *, RANK() OVER(ORDER BY score DESC, time ASC) AS score_rank FROM scores) AS grade WHERE id = %s\", (id,))\n rank = cur.fetchone()\n cur.execute(\"SELECT COUNT(id) FROM scores\")\n cur.close()\n conn.close()\n logger().info('rank = ' + str(rank))\n return rank[0]\n\n\ndef get_records_num():\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(id) FROM scores\")\n count = cur.fetchone()\n cur.close()\n conn.close()\n logger().info('rank = ' + str(count))\n return count[0]\n\n# id = 1 ~ 10\n@app.route(\"/shoot/\", methods=[\"GET\"])\ndef get_shoot(id='1'):\n if request.method == \"GET\":\n targets = []\n with open(targets_csv_path()) as f:\n reader = csv.reader(f)\n targets = next(reader)\n hit_num = get_hit_num(targets, id)\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(\"UPDATE current_score SET bullet = bullet - 1 WHERE id = %s\", (id,))\n if hit_num > 0:\n cur.execute(\"UPDATE current_score SET point = point + 1 WHERE id = %s\", (id,))\n conn.commit()\n cur.close()\n conn.close()\n return str(hit_num)\n\n# id = 1 ~ 10\n@app.route(\"/score/\", methods=[\"GET\"])\ndef get_score(id='1'):\n if request.method == \"GET\":\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM current_score WHERE id = %s\", (id,))\n score = cur.fetchone()\n cur.close()\n conn.close()\n dic = {'point': score[1], 'bullet': score[2]}\n return json.dumps(dic)\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef root():\n player_num = request.args.get(\"player_num\", 1)\n return render_template('index.html', player_num=int(player_num))\n\n\n@app.route(\"/result\", methods=[\"GET\"])\ndef show_result():\n player_num = int(request.args.get(\"player_num\", 1))\n scores = []\n for i in range(player_num):\n time = float(request.args.get(\"time\" + str(i), -1.0))\n score = int(request.args.get(\"score\" + str(i), -1))\n id = regist_score(time, score)\n rank = get_rank(id)\n scores.append({'time': time, 'score': score, 'rank': rank})\n count = get_records_num()\n return render_template('result.html', player_num=player_num, scores=scores, count=count)\n\n\n@app.route(\"/init\", methods=[\"GET\"])\ndef init_score():\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(\"UPDATE current_score SET bullet = 20, point = 0\")\n conn.commit()\n cur.close()\n conn.close()\n \n targets = []\n with open(targets_csv_path()) as f:\n reader = csv.reader(f)\n targets = next(reader)\n init_targets(targets)\n \n return \"OK\"\n\n\nif __name__ == \"__main__\":\n set_target()\n app.run(\"0.0.0.0\")\n", "sub_path": "web/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "common.c_ping.Pings", "line_number": 39, "usage_type": "call"}, {"api_name": "device.a_displayer.ApiDisplayer", "line_number": 40, "usage_type": "call"}, {"api_name": "common.c_debug.logger", "line_number": 45, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 47, "usage_type": "call"}, {"api_name": "re.search", "line_number": 52, "usage_type": "call"}, {"api_name": "ssl._create_default_https_context", "line_number": 56, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 56, "usage_type": "attribute"}, {"api_name": "common.c_debug.logger", "line_number": 57, "usage_type": "call"}, {"api_name": "common.c_debug.logger", "line_number": 61, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 64, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 64, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 66, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 66, "usage_type": "name"}, {"api_name": "common.c_debug.logger", "line_number": 72, "usage_type": "call"}, {"api_name": "ssl._create_default_https_context", "line_number": 78, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 78, "usage_type": "attribute"}, {"api_name": "common.c_debug.logger", "line_number": 80, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 83, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 83, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 83, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 84, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 84, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 84, "usage_type": "name"}, {"api_name": "common.c_debug.logger", "line_number": 86, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 91, "usage_type": "call"}, {"api_name": "common.c_debug.logger", "line_number": 106, "usage_type": "call"}, {"api_name": "common.c_debug.logger", "line_number": 118, "usage_type": "call"}, {"api_name": "common.c_debug.logger", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 167, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 173, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 173, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 176, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 182, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "312544970", "text": "#-*-coding:utf-8-*-\nimport json\nimport numpy as np\nfrom copy import deepcopy\nimport pickle\nimport time\nfrom scipy.sparse import csr_matrix as csr\n\ndef load_jsondata_from_file(path, ftype=None):\n \"\"\"\n return data are index data\\\\\n filtering result should be combined with the original json files\n \"\"\"\n print(\"loading %s\" % path)\n t0 = time.time()\n data = []\n with open(path, 'r') as f:\n if ftype == None:\n for line in f:\n item = json.loads(line)\n data.append(item)\n elif ftype == 'user':\n for line in f:\n item = json.loads(line)\n data.append({'user_id': item['user_id'], 'friends': item['friends']})\n elif ftype == 'business':\n for line in f:\n item = json.loads(line)\n data.append({'business_id': item['business_id'], 'categories': item['categories'], 'city': item['city']})\n elif ftype == 'review':\n for line in f:\n item = json.loads(line)\n data.append({'user_id': item['user_id'], 'business_id': item['business_id'], 'stars': item['stars']})\n print(\"loading %s done, time cost %.2f\" % (path, time.time()-t0))\n return data\n\ndef load_pickle(path):\n with open(path, 'rb') as fr:\n data = pickle.load(fr)\n return data\n\ndef get_id_to_ind(json_datas, filtered_list, filtered_name, id_name, multi_value):\n \"\"\"\n first appear position\n id for data_id, ind for index\n the same as set() and dict()\n \"\"\"\n ind2id = {}\n id2ind = {}\n tot = 0\n for data in json_datas:\n if data[filtered_name] not in filtered_list:\n continue\n if multi_value:\n # this get a list of data_id(str) with no spaces in the beginning or end\n data_ids = [data_id.strip() for data_id in data[id_name].split(',')] # strip to delete the spaces in the beginning or end of a line\n else:\n data_ids = [data[id_name]]\n for data_id in data_ids:\n if data_id not in id2ind:\n ind2id[tot] = data_id # input ind, output id\n id2ind[data_id] = tot # input id, output ind\n tot = tot + 1\n return ind2id, id2ind\n\ndef divide_rate(review_train):\n \"\"\"\n divide 1~5 rates to pos and neg classes\n <= 3: neg, >= 4: pos\n \"\"\"\n pos_reviews = []\n neg_reviews = []\n for review in review_train:\n if review['rate'] > 3.0:\n pos_reviews.append([review['user_id'], review['business_id'], review['rate']])\n elif review['rate'] <= 3.0:\n neg_reviews.append([review['user_id'], review['business_id'], review['rate']])\n return pos_reviews, neg_reviews\n\ndef dataset_split(reviews, uid2ind, bid2ind, train_ratio, valid_ratio, test_ratio):\n \"\"\"\n split the dataset as train, valid, and test set through train_ratio, valid_ratio, and test_ratio\n \"\"\"\n selected_reviews = []\n for review in reviews:\n if (review['user_id'] not in uid2ind) or (review['business_id'] not in bid2ind):\n continue\n filtered_review = {}\n filtered_review['user_id'] = uid2ind[review['user_id']]\n filtered_review['business_id'] = bid2ind[review['business_id']]\n filtered_review['rate'] = int(review['stars'])\n selected_reviews.append(filtered_review)\n\n n_reviews = len(selected_reviews) # indber of selected reviews\n test_indices = np.random.choice(range(n_reviews), size=int(n_reviews*test_ratio), replace=False) # randomly choose some indices as test set\n\n left = set(range(n_reviews)) - set(test_indices)\n n_left = len(left)\n\n valid_indices = np.random.choice(list(left), size=int(n_left*valid_ratio), replace=False)\n train_indices = list(left - set(valid_indices))\n\n train_data = [selected_reviews[index] for index in train_indices]\n valid_data = [selected_reviews[index] for index in valid_indices]\n test_data = [selected_reviews[index] for index in test_indices]\n return train_data, valid_data, test_data\n\ndef get_adj_matrix(uid2ind, bid2ind, city_id2ind, cat_id2ind, users, businesses, pos_reviews, neg_reviews):\n \"\"\"\n metapaths: UPB, UNB, UUPB, UUNB, UPBUB, UNBUB, UPBCaB, UNBCaB, UPBCiB, UNBCiB\n metapaths about pos are all pos, about neg are all neg\n \"\"\"\n tot_users = len(uid2ind) # tot for total\n tot_business = len(bid2ind)\n tot_city = len(city_id2ind)\n tot_category = len(cat_id2ind)\n #relation U-U\n adj_UU = np.zeros([tot_users, tot_users])\n adj_UPB = np.zeros([tot_users, tot_business])\n adj_UNB = np.zeros([tot_users, tot_business])\n adj_BCa = np.zeros([tot_business, tot_category])\n adj_BCi = np.zeros([tot_business, tot_city])\n for user in users:\n if user['user_id'] not in uid2ind:\n continue\n user_id = uid2ind[user['user_id']]\n for friend in user['friends'].split(','):\n friend = friend.strip()\n if friend in uid2ind:\n friend_id = uid2ind[friend]\n adj_UU[user_id][friend_id] = 1\n adj_UU[friend_id][user_id] = 1\n #relation U-P-B\n for review in pos_reviews:\n user_id = review[0]\n business_id = review[1]\n adj_UPB[user_id][business_id] = 1\n #relation U-N-B\n for review in neg_reviews:\n user_id = review[0]\n business_id = review[1]\n adj_UNB[user_id][business_id] = 1\n #relation B_Ca B_Ci\n for business in businesses:\n if business['business_id'] not in bid2ind:\n continue\n business_id = bid2ind[business['business_id']]\n city_id = city_id2ind[business['city']]\n adj_BCi[business_id][city_id] = 1\n for category in business['categories'].split(','):\n category = category.strip()\n category_id = cat_id2ind[category]\n adj_BCa[business_id][category_id] = 1\n\n #metapath\n adj_UUPB = adj_UU.dot(adj_UPB)\n adj_UUNB = adj_UU.dot(adj_UNB)\n\n adj_UPBU = adj_UPB.dot(adj_UPB.T)\n adj_UNBU = adj_UNB.dot(adj_UNB.T)\n\n adj_UPBUB = adj_UPBU.dot(adj_UPB)\n adj_UNBUB = adj_UNBU.dot(adj_UNB)\n\n adj_UPBCa = adj_UPB.dot(adj_BCa)\n adj_UPBCaB = adj_UPBCa.dot(adj_BCa.T)\n\n adj_UNBCa = adj_UNB.dot(adj_BCa)\n adj_UNBCaB = adj_UNBCa.dot(adj_BCa.T)\n\n adj_UPBCi = adj_UPB.dot(adj_BCi)\n adj_UPBCiB = adj_UPBCi.dot(adj_BCi.T)\n\n adj_UNBCi = adj_UNB.dot(adj_BCi)\n adj_UNBCiB = adj_UNBCi.dot(adj_BCi.T)\n\n return adj_UPB, adj_UNB, adj_UUPB, adj_UUNB, adj_UPBUB, adj_UNBUB, adj_UPBCaB, adj_UNBCaB, adj_UPBCiB, adj_UNBCiB\n\n\nif __name__ == \"__main__\":\n filtered_user = load_pickle('filtered/users.pickle')\n filtered_business = load_pickle('filtered/businesses.pickle')\n filtered_reviews = load_pickle('filtered/reviews.pickle') # success!\n\n user_json = load_jsondata_from_file('json/yelp_academic_dataset_user.json') # 25s 60.5s new\n business_json = load_jsondata_from_file('json/yelp_academic_dataset_business.json') # 4.45s 7.62s new\n review_json = load_jsondata_from_file('json/yelp_academic_dataset_review.json') # 69.8s 237.67s new \n\n ind2uid, uid2ind = get_id_to_ind(user_json, filtered_user, 'user_id', 'user_id', False)\n ind2bid, bid2ind = get_id_to_ind(business_json, filtered_business, 'business_id', 'business_id', False)\n ind2city_id, city_id2ind = get_id_to_ind(business_json, filtered_business, 'business_id', 'city', False)\n ind2cat_id, cat_id2ind = get_id_to_ind(business_json, filtered_business, 'business_id', 'categories', True)\n print(\"user_id2ind: %s\" % len(uid2ind))\n print(\"business_id2ind: %s\" % len(bid2ind))\n print(\"city_id2ind: %s\" % len(city_id2ind))\n print(\"category_id2ind: %s\" % len(cat_id2ind))\n\n r = (ind2uid, ind2bid, ind2city_id, ind2cat_id)\n r_names = ('ind2uid', 'ind2bid', 'ind2city_id', 'ind2cat_id')\n\n for i in range(len(r)):\n with open('adjs/' + r_names[i], 'wb') as f:\n pickle.dump(r[i], f, protocol=4)\n\n # dataset split\n review_train, review_valid, review_test = dataset_split(filtered_reviews, uid2ind, bid2ind, 0.8, 0.1, 0.2)\n # in review_train: {'rate':number, 'user_id':number, 'business_id':number}\n # train valid test data save\n print(\"generating ratings dataset\")\n d = (review_train, review_valid, review_test)\n d_names = ('ratings_train_1', 'ratings_valid_1', 'ratings_test_1')\n for i in range(len(d)):\n with open('rates/' + d_names[i] + '.txt', 'w') as f:\n for item in d[i]:\n f.write(str(item['user_id'])+' '+str(item['business_id'])+' '+str(item['rate'])+'\\n')\n\n # train data divide rate\n pos_reviews, neg_reviews = divide_rate(review_train)\n\n # cal adjacent matrices\n adj_UPB, adj_UNB, adj_UUPB, adj_UUNB, adj_UPBUB, adj_UNBUB, adj_UPBCaB, adj_UNBCaB, adj_UPBCiB, adj_UNBCiB = \\\n get_adj_matrix(uid2ind, bid2ind, city_id2ind, cat_id2ind, user_json, business_json, pos_reviews, neg_reviews)\n\n # relation save\n t = (adj_UPB, adj_UNB, adj_UUPB, adj_UUNB, adj_UPBUB, adj_UNBUB, adj_UPBCaB, adj_UNBCaB, adj_UPBCiB, adj_UNBCiB)\n t_names = ('adj_UPB', 'adj_UNB', 'adj_UUPB', 'adj_UUNB', 'adj_UPBUB', 'adj_UNBUB', 'adj_UPBCaB', 'adj_UNBCaB', 'adj_UPBCiB', 'adj_UNBCiB')\n for i in range(len(t)):\n with open('adjs/' + t_names[i] + '.res', 'w') as f:\n for uid, line in enumerate(t[i]):\n for bid, num in enumerate(line):\n if num != 0:\n write_str = '%d %d %.1f\\n' % (uid, bid, num)\n f.write(write_str)\n\n # TODO: modify rate to 0-1", "sub_path": "yelp_dataset/data_gen.py", "file_name": "data_gen.py", "file_ext": "py", "file_size_in_byte": 9612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "30760061", "text": "import pymysql\n\nclass DataBase:\n def __init__(self, dbName):\n self.alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n self.numbers = \"1234567890\"\n self.server = pymysql.connect(host=\"localhost\", user=\"root\", password=\"\", db=dbName)\n\n def validateUser(self, username, givenHash):\n if not self.isValid(username, givenHash):\n return False\n try:\n with self.server.cursor() as cursor:\n query = \"SELECT hash FROM users WHERE username = %s\"\n cursor.execute(query, (str(username)))\n respond = cursor.fetchone()[0]\n print(respond + \" =? \" + givenHash)\n if respond == givenHash:\n return True\n else:\n return False\n except TypeError:\n return False\n def getUser(self, username):\n if not self.isUsernameValid(username):\n return {}\n try:\n with self.server.cursor() as cursor:\n query = \"SELECT id, username FROM users WHERE username = %s\"\n cursor.execute(query, (str(username)))\n respond = cursor.fetchone()\n print(respond)\n # (\"id\", \"username\")\n return {\n \"id\": respond[0],\n \"username\": respond[1]\n }\n except TypeError:\n return {}\n def addUser(self, username, givenHash):\n if not self.isValid(username, givenHash):\n return False\n try:\n with self.server.cursor() as cursor:\n query = \"INSERT INTO users (username, hash) VALUES (%s, %s)\"\n cursor.execute(query, (str(username), str(givenHash)))\n self.server.commit()\n return True\n except Exception as e:\n print(\"Error while adding a user error occurred: \", e)\n return False\n\n def isValid(self, username, givenHash):\n return self.isUsernameValid(username) and self.isHashValid(givenHash)\n\n def isUsernameValid(self, username):\n if not isinstance(username, str):\n return False\n acceptedCharacters = self.alphabet + self.alphabet.upper() + self.numbers\n temp = username\n for character in acceptedCharacters:\n temp = temp.replace(character, \"\")\n return temp == \"\"\n\n def isHashValid(self, givenHash):\n if not isinstance(givenHash, str):\n return False\n acceptedCharacters = self.alphabet + self.alphabet.upper() + self.numbers\n temp = givenHash\n for character in acceptedCharacters:\n temp = temp.replace(character, \"\")\n return temp == \"\"", "sub_path": "app/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 2667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pymysql.connect", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "69109278", "text": "from __future__ import absolute_import, unicode_literals\nimport urllib.request\nimport os\nimport shutil\nfrom typing import List\nfrom contextlib import closing\nfrom data_refinery_common.models import File, DownloaderJob\nfrom data_refinery_workers.downloaders import utils\nfrom data_refinery_common.logging import get_and_configure_logger\n\n\nlogger = get_and_configure_logger(__name__)\n\n\n# chunk_size is in bytes\nCHUNK_SIZE = 1024 * 256\n\n\ndef _verify_files(file1: File, file2: File, job: DownloaderJob) -> None:\n \"\"\"Verifies that the two files are the same.\n\n This is useful for this downloader because each job has two\n batches which should each have the same two files.\n \"\"\"\n if file1.download_url != file2.download_url:\n failure_message = (\"A Batch's file doesn't have the same download \"\n \"URL as the other batch's file.\")\n logger.error(failure_message,\n downloader_job=job.id)\n job.failure_reason = failure_message\n raise ValueError(failure_message)\n\n\ndef _download_file(download_url: str, file_path: str, job: DownloaderJob) -> None:\n failure_template = \"Exception caught while downloading file from: %s\"\n try:\n logger.debug(\"Downloading file from %s to %s.\",\n download_url,\n file_path,\n downloader_job=job.id)\n urllib.request.urlcleanup()\n target_file = open(file_path, \"wb\")\n with closing(urllib.request.urlopen(download_url)) as request:\n shutil.copyfileobj(request, target_file, CHUNK_SIZE)\n\n # Ancient unresolved bug. WTF python: https://bugs.python.org/issue27973\n urllib.request.urlcleanup()\n except Exception:\n logger.exception(failure_template,\n download_url,\n downloader_job=job.id)\n job.failure_reason = failure_template % download_url\n raise\n finally:\n target_file.close()\n\n\ndef _upload_files(job_dir: str, files: List[File], job: DownloaderJob) -> None:\n try:\n for file in files:\n file.size_in_bytes = os.path.getsize(file.get_temp_pre_path(job_dir))\n file.save()\n file.upload_raw_file(job_dir)\n except Exception:\n logger.exception(\"Exception caught while uploading file.\",\n downloader_job=job.id,\n batch=file.batch.id)\n job.failure_reason = \"Exception caught while uploading file.\"\n raise\n finally:\n file.remove_temp_directory(job_dir)\n\n\ndef download_transcriptome(job_id: int) -> None:\n \"\"\"The main function for the Transcriptome Index Downloader.\n\n Two files are needed for the Transcriptome Index Downloader: a\n fasta file and a gtf file. However each pair need to be processed\n into two different sized indices. (See the\n processors.transcriptome_index._create_index function's docstring\n for more info.) Therefore we only download each set once, then\n push it to Temporary Storage twice.\n \"\"\"\n job = utils.start_job(job_id)\n batches = job.batches.all()\n success = True\n job_dir = utils.JOB_DIR_PREFIX + str(job_id)\n\n try:\n first_fasta_file = File.objects.get(batch=batches[0], raw_format__exact=\"fa.gz\")\n first_gtf_file = File.objects.get(batch=batches[0], raw_format__exact=\"gtf.gz\")\n second_fasta_file = File.objects.get(batch=batches[1], raw_format__exact=\"fa.gz\")\n second_gtf_file = File.objects.get(batch=batches[1], raw_format__exact=\"gtf.gz\")\n os.makedirs(first_fasta_file.get_temp_dir(job_dir), exist_ok=True)\n except Exception:\n logger.exception(\"Failed to retrieve all expected files from database.\",\n downloader_job=job.id)\n job.failure_reason = \"Failed to retrieve all expected files from database.\"\n success = False\n\n if success:\n try:\n _verify_files(first_fasta_file, second_fasta_file, job)\n _verify_files(first_gtf_file, second_gtf_file, job)\n\n # The two Batches share the same fasta and gtf files, so\n # only download each one once\n _download_file(first_fasta_file.download_url,\n first_fasta_file.get_temp_pre_path(job_dir),\n job)\n _download_file(first_gtf_file.download_url,\n first_gtf_file.get_temp_pre_path(job_dir),\n job)\n\n # Then create symlinks so the files for the second Batch\n # can be found where they will be expected to.\n try:\n os.symlink(first_fasta_file.get_temp_pre_path(job_dir),\n second_fasta_file.get_temp_pre_path(job_dir))\n os.symlink(first_gtf_file.get_temp_pre_path(job_dir),\n second_gtf_file.get_temp_pre_path(job_dir))\n except Exception:\n logger.exception(\"Exception caught while creating symlinks.\",\n downloader_job=job.id)\n job.failure_reason = \"Exception caught while creating symlinks.\"\n raise\n\n _upload_files(job_dir,\n [first_fasta_file, first_gtf_file, second_fasta_file, second_gtf_file],\n job)\n except Exception:\n # Exceptions are already logged and handled.\n # Just need to mark the job as failed.\n success = False\n\n if success:\n logger.debug(\"Files %s and %s downloaded successfully.\",\n first_fasta_file,\n first_gtf_file,\n downloader_job=job_id)\n\n utils.end_job(job, batches, success)\n", "sub_path": "workers/data_refinery_workers/downloaders/transcriptome_index.py", "file_name": "transcriptome_index.py", "file_ext": "py", "file_size_in_byte": 5751, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "data_refinery_common.logging.get_and_configure_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "data_refinery_common.models.File", "line_number": 19, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 19, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 34, "usage_type": "name"}, {"api_name": "urllib.request.request.urlcleanup", "line_number": 41, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 41, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 41, "usage_type": "name"}, {"api_name": "contextlib.closing", "line_number": 43, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 43, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 43, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 43, "usage_type": "name"}, {"api_name": "shutil.copyfileobj", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.request.request.urlcleanup", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 47, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "name"}, {"api_name": "data_refinery_common.models.File", "line_number": 58, "usage_type": "name"}, {"api_name": "data_refinery_common.models.DownloaderJob", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "data_refinery_workers.downloaders.utils.start_job", "line_number": 84, "usage_type": "call"}, {"api_name": "data_refinery_workers.downloaders.utils", "line_number": 84, "usage_type": "name"}, {"api_name": "data_refinery_workers.downloaders.utils.JOB_DIR_PREFIX", "line_number": 87, "usage_type": "attribute"}, {"api_name": "data_refinery_workers.downloaders.utils", "line_number": 87, "usage_type": "name"}, {"api_name": "data_refinery_common.models.File.objects.get", "line_number": 90, "usage_type": "call"}, {"api_name": "data_refinery_common.models.File.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.File", "line_number": 90, "usage_type": "name"}, {"api_name": "data_refinery_common.models.File.objects.get", "line_number": 91, "usage_type": "call"}, {"api_name": "data_refinery_common.models.File.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.File", "line_number": 91, "usage_type": "name"}, {"api_name": "data_refinery_common.models.File.objects.get", "line_number": 92, "usage_type": "call"}, {"api_name": "data_refinery_common.models.File.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.File", "line_number": 92, "usage_type": "name"}, {"api_name": "data_refinery_common.models.File.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "data_refinery_common.models.File.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "data_refinery_common.models.File", "line_number": 93, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 94, "usage_type": "call"}, {"api_name": "os.symlink", "line_number": 118, "usage_type": "call"}, {"api_name": "os.symlink", "line_number": 120, "usage_type": "call"}, {"api_name": "data_refinery_workers.downloaders.utils.end_job", "line_number": 142, "usage_type": "call"}, {"api_name": "data_refinery_workers.downloaders.utils", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "56218256", "text": "import torch\nfrom torch import nn\n\nclass SH_SelfAttention(nn.Module):\n \"\"\" single head self-attention module\n \"\"\"\n def __init__(self, input_size):\n \n super().__init__()\n # define query, key and value transformation matrices\n # usually input_size is equal to embed_size\n self.embed_size = input_size\n self.Wq = nn.Linear(input_size, self.embed_size, bias=False)\n self.Wk = nn.Linear(input_size, self.embed_size, bias=False)\n self.Wv = nn.Linear(input_size, self.embed_size, bias=False)\n self.softmax = nn.Softmax(dim=2) # normalized across feature dimension\n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, ddi similarity type vector, input_size)\n \"\"\"\n X_q = self.Wq(X) # queries\n X_k = self.Wk(X) # keys\n X_v = self.Wv(X) # values\n \n # scaled queries and keys by forth root \n X_q_scaled = X_q / (self.embed_size ** (1/4))\n X_k_scaled = X_k / (self.embed_size ** (1/4))\n \n attn_w = torch.bmm(X_q_scaled, X_k_scaled.transpose(1,2))\n \n attn_w_normalized = self.softmax(attn_w)\n \n # reweighted value vectors\n z = torch.bmm(attn_w_normalized, X_v)\n \n return z, attn_w_normalized\n \n\nclass MH_SelfAttention(nn.Module):\n \"\"\" multi head self-attention module\n \"\"\"\n def __init__(self, input_size, num_attn_heads):\n \n super().__init__()\n \n layers = [SH_SelfAttention(input_size) for i in range(num_attn_heads)]\n \n self.multihead_pipeline = nn.ModuleList(layers)\n embed_size = input_size\n self.Wz = nn.Linear(num_attn_heads*embed_size, embed_size)\n \n \n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, ddi similarity type vector, input_size)\n \"\"\"\n bsize, num_modal, inp_dim = X.shape\n attn_tensor = X.new_zeros((bsize, num_modal, num_modal))\n out = []\n for SH_layer in self.multihead_pipeline:\n z, attn_w_normalized = SH_layer(X)\n out.append(z)\n attn_tensor += attn_w_normalized\n # concat on the feature dimension\n out = torch.cat(out, -1) \n attn_tensor = attn_tensor/len(self.multihead_pipeline)\n \n # return a unified vector mapping of the different self-attention blocks\n return self.Wz(out), attn_tensor\n \n\nclass TransformerUnit(nn.Module):\n \n def __init__(self, input_size, num_attn_heads, mlp_embed_factor, nonlin_func, pdropout):\n \n super().__init__()\n \n embed_size = input_size\n self.multihead_attn = MH_SelfAttention(input_size, num_attn_heads)\n \n self.layernorm_1 = nn.LayerNorm(embed_size)\n \n self.MLP = nn.Sequential(\n nn.Linear(embed_size, embed_size*mlp_embed_factor),\n nonlin_func,\n nn.Linear(embed_size*mlp_embed_factor, embed_size)\n )\n \n self.layernorm_2 = nn.LayerNorm(embed_size)\n \n self.dropout = nn.Dropout(p=pdropout)\n \n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, ddi similarity type vector, input_size)\n \"\"\"\n # z is tensor of size (batch, ddi similarity type vector, input_size)\n z, attn_tensor = self.multihead_attn(X)\n # layer norm with residual connection\n z = self.layernorm_1(z + X)\n z = self.dropout(z)\n z_ff= self.MLP(z)\n z = self.layernorm_2(z_ff + z)\n z = self.dropout(z)\n \n return z, attn_tensor\n \nclass FeatureEmbAttention(nn.Module):\n def __init__(self, input_dim):\n '''\n Args:\n input_dim: int, size of the input vector (i.e. feature vector)\n '''\n\n super().__init__()\n self.input_dim = input_dim\n # use this as query vector against the transformer outputs\n self.queryv = nn.Parameter(torch.randn(input_dim, dtype=torch.float32), requires_grad=True)\n self.softmax = nn.Softmax(dim=1) # normalized across seqlen\n\n def forward(self, X):\n '''Performs forward computation\n Args:\n X: torch.Tensor, (batch, ddi similarity type vector, feat_dim), dtype=torch.float32\n '''\n\n X_scaled = X / (self.input_dim ** (1/4))\n queryv_scaled = self.queryv / (self.input_dim ** (1/4))\n # using matmul to compute tensor vector multiplication\n \n # (bsize, seqlen)\n attn_weights = X_scaled.matmul(queryv_scaled)\n\n # softmax\n attn_weights_norm = self.softmax(attn_weights)\n\n # reweighted value vectors (in this case reweighting the original input X)\n # unsqueeze attn_weights_norm to get (bsize, 1, num similarity type vectors)\n # perform batch multiplication with X that has shape (bsize, num similarity type vectors, feat_dim)\n # result will be (bsize, 1, feat_dim)\n # squeeze the result to obtain (bsize, feat_dim)\n z = attn_weights_norm.unsqueeze(1).bmm(X).squeeze(1)\n \n # returns (bsize, feat_dim), (bsize, num similarity type vectors)\n return z, attn_weights_norm\n\ndef _init_model_params(named_parameters):\n for p_name, p in named_parameters:\n param_dim = p.dim()\n if param_dim > 1: # weight matrices\n nn.init.xavier_uniform_(p)\n elif param_dim == 1: # bias parameters\n if p_name.endswith('bias'):\n nn.init.uniform_(p, a=-1.0, b=1.0)\n\nclass DDI_Transformer(nn.Module):\n\n def __init__(self, input_size=586, input_embed_dim=64, num_attn_heads=8, mlp_embed_factor=2, \n nonlin_func=nn.ReLU(), pdropout=0.3, num_transformer_units=12,\n pooling_mode = 'attn'):\n \n super().__init__()\n embed_size = input_size\n self.Wembed = nn.Linear(input_size, embed_size)\n \n trfunit_layers = [TransformerUnit(embed_size, num_attn_heads, mlp_embed_factor, nonlin_func, pdropout) for i in range(num_transformer_units)]\n self.trfunit_pipeline = nn.ModuleList(trfunit_layers)\n\n self.pooling_mode = pooling_mode\n if pooling_mode == 'attn':\n self.pooling = FeatureEmbAttention(embed_size)\n elif pooling_mode == 'mean':\n self.pooling = torch.mean\n\n self._init_params_()\n \n \n def _init_params_(self):\n _init_model_params(self.named_parameters())\n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, ddi similarity type vector, input_size)\n \"\"\"\n\n # mean pooling TODO: add global attention layer or other pooling strategy\n bsize, num_modal, inp_dim = X.shape\n attn_tensor = X.new_zeros((bsize, num_modal, num_modal))\n xinput = X\n for encunit in self.trfunit_pipeline:\n z, attn_h_tensor = encunit(xinput)\n xinput = z\n attn_tensor += attn_h_tensor\n attn_tensor = attn_tensor/len(self.trfunit_pipeline)\n \n # pool across similarity type vectors\n # Note: z.mean(dim=1) will change shape of z to become (batch, input_size)\n # we can keep dimension by running z.mean(dim=1, keepdim=True) to have (batch, 1, input_size)\n\n # pool across similarity type vectors\n if self.pooling_mode == 'attn':\n z, fattn_w_norm = self.pooling(z)\n # Note: z.mean(dim=1) or self.pooling(z, dim=1) will change shape of z to become (batch, embedding dim)\n # we can keep dimension by running z.mean(dim=1, keepdim=True) to have (batch, 1, embedding dim)\n elif self.pooling_mode == 'mean':\n z = self.pooling(z, dim=1)\n fattn_w_norm = None\n \n return z, fattn_w_norm, attn_tensor\n\nclass DDI_SiameseTrf(nn.Module):\n\n def __init__(self, input_dim, dist, num_classes=2):\n \n super().__init__()\n \n if dist == 'euclidean':\n self.dist = nn.PairwiseDistance(p=2, keepdim=True)\n self.alpha = 0\n elif dist == 'manhattan':\n self.dist = nn.PairwiseDistance(p=1, keepdim=True)\n self.alpha = 0\n elif dist == 'cosine':\n self.dist = nn.CosineSimilarity(dim=1)\n self.alpha = 1\n\n self.Wy = nn.Linear(2*input_dim+1, num_classes)\n # perform log softmax on the feature dimension\n self.log_softmax = nn.LogSoftmax(dim=-1)\n\n self._init_params_() \n \n def _init_params_(self):\n _init_model_params(self.named_parameters())\n \n def forward(self, Z_a, Z_b):\n \"\"\"\n Args:\n Z_a: tensor, (batch, embedding dim)\n Z_b: tensor, (batch, embedding dim)\n \"\"\"\n\n dist = self.dist(Z_a, Z_b).reshape(-1,1)\n # update dist to distance measure if cosine is chosen\n dist = self.alpha * (1-dist) + (1-self.alpha) * dist\n \n out = torch.cat([Z_a, Z_b, dist], axis=-1)\n y = self.Wy(out)\n return self.log_softmax(y), dist\n \nclass DDI_Transformer_Softmax(nn.Module):\n\n def __init__(self, input_size=586, input_embed_dim=64, num_attn_heads=8, mlp_embed_factor=2, \n nonlin_func=nn.ReLU(), pdropout=0.3, num_transformer_units=12,\n pooling_mode = 'attn', num_classes=2):\n \n super().__init__()\n \n embed_size = input_size #input_embed_dim\n\n self.Wembed = nn.Linear(input_size, embed_size)\n \n trfunit_layers = [TransformerUnit(embed_size, num_attn_heads, mlp_embed_factor, nonlin_func, pdropout) for i in range(num_transformer_units)]\n self.trfunit_pipeline = nn.Sequential(*trfunit_layers)\n\n self.Wy = nn.Linear(embed_size, num_classes)\n self.pooling_mode = pooling_mode\n if pooling_mode == 'attn':\n self.pooling = FeatureEmbAttention(embed_size)\n elif pooling_mode == 'mean':\n self.pooling = torch.mean\n\n # perform log softmax on the feature dimension\n self.log_softmax = nn.LogSoftmax(dim=-1)\n self._init_params_()\n \n \n def _init_params_(self):\n for p_name, p in self.named_parameters():\n param_dim = p.dim()\n if param_dim > 1: # weight matrices\n nn.init.xavier_uniform_(p)\n elif param_dim == 1: # bias parameters\n if p_name.endswith('bias'):\n nn.init.uniform_(p, a=-1.0, b=1.0)\n \n def forward(self, X):\n \"\"\"\n Args:\n X: tensor, (batch, ddi similarity type vector, input_size)\n \"\"\"\n\n X = self.Wembed(X) \n z = self.trfunit_pipeline(X)\n \n # mean pooling TODO: add global attention layer or other pooling strategy\n # pool across similarity type vectors\n # Note: z.mean(dim=1) will change shape of z to become (batch, input_size)\n # we can keep dimension by running z.mean(dim=1, keepdim=True) to have (batch, 1, input_size)\n\n # pool across similarity type vectors\n if self.pooling_mode == 'attn':\n z, fattn_w_norm = self.pooling(z)\n # Note: z.mean(dim=1) or self.pooling(z, dim=1) will change shape of z to become (batch, embedding dim)\n # we can keep dimension by running z.mean(dim=1, keepdim=True) to have (batch, 1, embedding dim)\n elif self.pooling_mode == 'mean':\n z = self.pooling(z, dim=1)\n fattn_w_norm = None\n\n y = self.Wy(z) \n \n return self.log_softmax(y) #,fattn_w_norm\n", "sub_path": "ddi/model_attn_siamese.py", "file_name": "model_attn_siamese.py", "file_ext": "py", "file_size_in_byte": 11576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn.Softmax", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.nn.init.uniform_", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 160, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 162, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 162, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 165, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 179, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 218, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 218, "usage_type": "name"}, {"api_name": "torch.nn.PairwiseDistance", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 225, "usage_type": "name"}, {"api_name": "torch.nn.PairwiseDistance", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 228, "usage_type": "name"}, {"api_name": "torch.nn.CosineSimilarity", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 231, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 234, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 258, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 258, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 261, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 268, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 271, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 273, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 278, "usage_type": "attribute"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 281, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 289, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 289, "usage_type": "name"}, {"api_name": "torch.nn.init.uniform_", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 292, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 292, "usage_type": "name"}]} +{"seq_id": "572177804", "text": "\"\"\"\r\nName: Vicente James Perez\r\nDate: 2/21/2021\r\nAssignment: Module 5: Role Based Access Control\r\n Module 6: Encrypt Data in Database\r\n Module 7: Send Encrypted Message to Boss\r\nDue Date: 2/21/2021\r\nAbout this project: Implement RBAC onto Module 4 flask website\r\n Add encryption to certain fields\r\n Add TCPServer communication and new Messages.db\r\nAssumptions:NA\r\nAll work below was performed by Vicente James Perez\r\n\"\"\"\r\n\r\n# Note: some code below is from my Module 3 assignment\r\nimport sqlite3 as sql\r\nimport pandas as pd\r\nimport Encryption as Enc\r\nimport base64\r\n\r\n# DDL Portion:\r\nconnection1 = sql.connect('.\\SecretAgents.db')\r\nconnection2 = sql.connect('.\\Messages.db')\r\ncursor1 = connection1.cursor()\r\ncursor2 = connection2.cursor()\r\n\r\n# validation that table exists - if so, drop it\r\ncursor1.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='SecretAgents' ''')\r\nif cursor1.fetchone()[0] == 1:\r\n connection1.execute('DROP TABLE SecretAgents')\r\n connection1.commit()\r\ncursor2.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='Messages' ''')\r\nif cursor2.fetchone()[0] == 1:\r\n connection2.execute('DROP TABLE Messages')\r\n connection2.commit()\r\n\r\n# table creation/re-creation with the 5 required parameters\r\ncursor1.execute('''CREATE TABLE SecretAgents(AgentID INTEGER PRIMARY KEY NOT NUll, AgentName TEXT NOT NULL, \r\n AgentAlias TEXT NOT NULL, AgentSecurityLevel INTEGER NOT NULL, LoginPassword TEXT NOT NULL)''')\r\nconnection1.commit()\r\n# create Message table with the 3 required parameters\r\ncursor2.execute('''CREATE TABLE Messages(MessageID INTEGER PRIMARY KEY NOT NULL, AgentID INTEGER NOT NULL, \r\n Message TEXT NOT NULL)''')\r\nconnection2.commit()\r\n\r\n# sample_data taken from sample output in Assignment outline\r\nsample_data = [[1, 'Princess Diana', 'Lady Di', 1, 'test123'],\r\n [2, 'Henry Thorgood', 'Goody 2 shoes', 3, 'test123'],\r\n [3, 'Tina Fairchild', 'Happy', 1, 'test123'],\r\n [4, 'Tom Smith', 'Sleepy', 1, 'test987'],\r\n [5, 'Kim Lovegood', 'Snoozy', 2, 'test987'],\r\n [6, 'Tim Harris', 'Doc', 3, 'test987']]\r\n\r\nsample_data2 = [[1, 1, 'test123'],\r\n [2, 3, 'test123'],\r\n [3, 1, 'test123'],\r\n [4, 1, 'test987'],\r\n [5, 2, 'test987'],\r\n [6, 3, 'test987']]\r\n\r\n# encrypt required fields\r\nfor i in range(len(sample_data)):\r\n sample_data[i][1] = Enc.cipher.encrypt(sample_data[i][1].encode('utf-8')).decode('utf-8')\r\n sample_data[i][2] = Enc.cipher.encrypt(sample_data[i][2].encode('utf-8')).decode('utf-8')\r\n sample_data[i][4] = Enc.cipher.encrypt(sample_data[i][4].encode('utf-8')).decode('utf-8')\r\n# inserting data above into table\r\ncursor1.executemany('INSERT INTO SecretAgents VALUES (?,?,?,?,?)', sample_data)\r\nconnection1.commit()\r\n\r\ncursor2.executemany('INSERT INTO Messages VALUES (?,?,?)', sample_data2)\r\nconnection2.commit()\r\n\r\n# using pandas dataframe to print out table\r\n\r\nagent_df = pd.read_sql_query('SELECT * FROM SecretAgents', connection1)\r\nprint('SecretAgents Table: ')\r\nprint(agent_df.to_string(index=False), '\\n')\r\n\r\nmessages_df = pd.read_sql_query('SELECT * FROM Messages', connection2)\r\nprint('Messages Table: ')\r\nprint(messages_df.to_string(index=False), '\\n')\r\n", "sub_path": "Flask_website_db/Setup.py", "file_name": "Setup.py", "file_ext": "py", "file_size_in_byte": 3407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sqlite3.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 23, "usage_type": "call"}, {"api_name": "Encryption.cipher.encrypt", "line_number": 63, "usage_type": "call"}, {"api_name": "Encryption.cipher", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Encryption.cipher.encrypt", "line_number": 64, "usage_type": "call"}, {"api_name": "Encryption.cipher", "line_number": 64, "usage_type": "attribute"}, {"api_name": "Encryption.cipher.encrypt", "line_number": 65, "usage_type": "call"}, {"api_name": "Encryption.cipher", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pandas.read_sql_query", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "339771804", "text": "import argparse\nfrom nipype.interfaces.niftyreg import RegMeasure\n\nparser = argparse.ArgumentParser(description='NiftyReg measure nipype unit tests')\nparser.add_argument('-r','--ref', help='Reference image path', required=True)\nparser.add_argument('-f','--flo', help='Floating image path', required=True)\nparser.add_argument('-m','--mea', choices=['ncc', 'lncc', 'nmi', 'ssd'], \n default='ncc', help='Measure to compute', required=True);\n\nargs = parser.parse_args()\n\nmeasure = RegMeasure()\nmeasure.inputs.ref_file = args.ref\nmeasure.inputs.flo_file = args.flo\nmeasure.inputs.measure_type = args.mea\n\nresult = measure.run()\nprint(result.outputs.out_file)", "sub_path": "nipype/interfaces/niftyreg/tests/measure_nipype_test.py", "file_name": "measure_nipype_test.py", "file_ext": "py", "file_size_in_byte": 672, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "nipype.interfaces.niftyreg.RegMeasure", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "450258572", "text": "from __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import zip\nfrom builtins import object\nimport csv\nimport unittest\nimport urllib.request, urllib.error, urllib.parse\nimport json\nimport api.StockAdvisor.utils as myutils\n\nfrom watson_developer_cloud import SpeechToTextV1\nimport watson_developer_cloud.natural_language_understanding.features.v1 \\\n as Features\nfrom watson_developer_cloud.natural_language_understanding_v1 import NaturalLanguageUnderstandingV1\n\nfrom api.StockAdvisor.settings import natural_language_understanding_settings as nlu_settings\nfrom time import sleep\nfrom bs4 import BeautifulSoup\n\nDEBUG = True\n\n\nclass ProcessUrls(object):\n url_file_name = \"\"\n content = \"\"\n nl_understanding = None # will store the understanding\n open_files = {}\n csv_header = []\n json_arrays = {}\n\n def __init__(self, url_file_name):\n self.url_file_name = url_file_name\n\n pass\n\n def get_json_array_for_stock_symbol(self, stock_symbol):\n if stock_symbol not in list(self.json_arrays.keys()):\n self.json_arrays[stock_symbol] = []\n return self.json_arrays[stock_symbol]\n\n def write_data(self):\n for key in list(self.json_arrays.keys()):\n out_file = open(\"data/{}.json\".format(key), \"wb\")\n json.dump(self.json_arrays[key], out_file)\n out_file.close()\n\n def process(self):\n\n f = open(self.url_file_name, 'rb')\n reader = csv.reader(f)\n for (i, row) in enumerate(reader):\n\n if DEBUG and i > 5:\n break\n if i == 0:\n self.csv_header = row\n\n else:\n print(i, row)\n nl_understanding = \"\"\n cur_dictionary = dict(list(zip(self.csv_header, row)))\n date_key = \"time\"\n if date_key in list(cur_dictionary.keys()):\n cur_dictionary[date_key] = myutils.normalize_date_time(cur_dictionary.get(date_key)).strftime(\"%Y%m%d\")\n\n self.target_url = row[2]\n # content = \"\"\n # try:\n # html_content = urllib2.urlopen(self.target_url).read()\n # soup = BeautifulSoup(html_content, \"html.parser\")\n # content = soup.get_text()\n # except:\n # pass\n nl_understanding =myutils.get_nl_understanding_from_bluemix(row[2])\n\n if nl_understanding:\n cur_dictionary[\"sentiment\"] = nl_understanding.get(\"sentiment\", [])\n cur_dictionary[\"keywords\"] = nl_understanding.get(\"keywords\", [])\n\n cur_json_array = self.get_json_array_for_stock_symbol(myutils.clean_key(row[0]))\n cur_json_array.append(cur_dictionary)\n\n return self.json_arrays\n # self.write_data()\n\n\nclass TestProcessUrls(unittest.TestCase):\n def setUp(self):\n unittest.TestCase.setUp(self)\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n\n def testMet1(self):\n pu = ProcessUrls(\"/home/marlabs/codebase/stock-advisor/data/stock_info.csv\")\n pu.process()\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "api/StockAdvisor/bluemix/process_urls.py", "file_name": "process_urls.py", "file_ext": "py", "file_size_in_byte": 3271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "future.standard_library.install_aliases", "line_number": 3, "usage_type": "call"}, {"api_name": "future.standard_library", "line_number": 3, "usage_type": "name"}, {"api_name": "builtins.object", "line_number": 24, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 45, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 51, "usage_type": "call"}, {"api_name": "builtins.zip", "line_number": 62, "usage_type": "call"}, {"api_name": "api.StockAdvisor.utils.normalize_date_time", "line_number": 65, "usage_type": "call"}, {"api_name": "api.StockAdvisor.utils", "line_number": 65, "usage_type": "name"}, {"api_name": "api.StockAdvisor.utils.get_nl_understanding_from_bluemix", "line_number": 75, "usage_type": "call"}, {"api_name": "api.StockAdvisor.utils", "line_number": 75, "usage_type": "name"}, {"api_name": "api.StockAdvisor.utils.clean_key", "line_number": 81, "usage_type": "call"}, {"api_name": "api.StockAdvisor.utils", "line_number": 81, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 88, "usage_type": "attribute"}, {"api_name": "unittest.TestCase.setUp", "line_number": 90, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 90, "usage_type": "attribute"}, {"api_name": "unittest.TestCase.tearDown", "line_number": 93, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 93, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "353517742", "text": "import sys\n\nimport requests\n\nfrom src.logger import get_logger\nfrom src.response import response_client_error, response_ok\n\n\ndef parse_qs(qs):\n return qs.get('year'), qs.get('month'), qs.get('day'), qs.get('cityCode')\n\n\nclass WasteAvgAPI(object):\n\n def __init__(self, query_string):\n self.qs = query_string\n self.logger = get_logger(__name__)\n self.api_client = self.client_session\n\n def fetch(self) -> requests.Response:\n self.logger.info(f'WasteAvgAPI API called with {self.api_endpoint}')\n self.logger.info(f'Parameters: {self.requested_params}')\n\n response = self.api_client.get(self.api_endpoint, params=self.requested_params).json()\n self.logger.info(f'### Cites Wastes: {response}')\n\n totalWaste = 0\n for cityObj in response:\n self.logger.info(f'### cityObj: {cityObj}')\n totalWaste += cityObj['wasteTotal']\n\n return {\n 'avg': totalWaste / len(response) if (len(response) > 0) / 1000000 else 0,\n 'total': totalWaste / 1000000\n }\n\n @property\n def client_session(self):\n session = requests.Session()\n session.headers.update({\n 'Content-Type': 'application/json;charset=utf8'\n })\n return session\n\n @property\n def api_endpoint(self):\n cityCode = self.qs.get('cityCode')\n if not cityCode:\n return f'https://ovbvbxecc0.execute-api.ap-northeast-2.amazonaws.com/dev/ooops/waste/cities/'\n else:\n return f'https://ovbvbxecc0.execute-api.ap-northeast-2.amazonaws.com/dev/ooops/waste/{cityCode}/'\n\n @property\n def requested_params(self):\n year, month, day, cityCode = parse_qs(self.qs)\n\n return {\n \"year\": year,\n \"month\": month,\n \"day\": day,\n \"cityCode\": cityCode\n }\n\n\ndef main(event, context):\n if not event['queryStringParameters']:\n return response_client_error(None)\n\n api = WasteAvgAPI(event['queryStringParameters'])\n try:\n response = api.fetch()\n api.logger.info(f'Average successfully calculated: {response}')\n return response_ok(response)\n\n except Exception as e:\n _, _, trace_back = sys.exc_info()\n api.logger.error(e, exc_info=True)\n api.logger.error(trace_back)\n", "sub_path": "src/waste_avg.py", "file_name": "waste_avg.py", "file_ext": "py", "file_size_in_byte": 2325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "src.logger.get_logger", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.Response", "line_number": 20, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 39, "usage_type": "call"}, {"api_name": "src.response.response_client_error", "line_number": 67, "usage_type": "call"}, {"api_name": "src.response.response_ok", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "139714131", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2008-2014 Erik Svensson \n# Licensed under the MIT license.\n\nfrom setuptools import setup\n\nrequired = ['six>=1.1.0']\n\nsetup(\n name='transmissionrpc',\n version='0.12',\n description='Python module that implements the Transmission bittorent client RPC protocol.',\n author='Erik Svensson',\n author_email='erik.public@gmail.com',\n url='http://bitbucket.org/blueluna/transmissionrpc',\n keywords='transmission bittorent torrent',\n packages=['transmissionrpc'],\n install_requires = required,\n test_suite = \"test\",\n zip_safe=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Communications :: File Sharing',\n 'Topic :: Internet'\n ],\n )\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "setuptools.setup", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "566991726", "text": "#!/usr/bin/env python\n\n# PROGRAM: sst_lat_l3u_l4.py\n# ----------------------------------------------------------------------------------\n# Version 0.4\n# 1 August, 2019\n# michael.taylor AT reading DOT ac DOT uk \n\nimport os\nimport os.path\nimport glob\nfrom optparse import OptionParser\nimport numpy as np\nimport xarray\nimport seaborn as sns; sns.set(style=\"darkgrid\")\nimport matplotlib\nimport matplotlib.pyplot as plt; plt.close(\"all\")\nfrom statsmodels import robust as rb\n\ndef plot_sst_lat(lat,sst_q3_lat,sst_q5_lat,plotfile):\n\n file_str = plotfile + \".png\"\n\n fig, ax = plt.subplots()\n plt.plot(sst_q3_lat, lat, '.', color='g', alpha=0.2, label='ql=3 (L3U)')\n plt.plot(sst_q5_lat, lat, '.', color='r', alpha=0.2, label='ql=4&5 (L3U)')\n ax = plt.gca()\n ax.set_ylim([-91,90])\n ticks = ax.get_yticks()\n ax.set_yticks(np.linspace(-90, 90, 7))\n plt.legend(loc=4, fontsize=8)\n plt.xlabel(r'mean SST / $K$')\n plt.ylabel(r'Latitude / $degrees$, N')\n plt.title(file_str, fontsize=8)\n fig.tight_layout()\n plt.savefig(file_str)\n plt.close('all')\n\n return\n\ndef plot_sst_lat_diff(lat,sst_q3_lat_diff,sst_q5_lat_diff,plotfile):\n\n gd_q3 = np.isfinite(sst_q3_lat_diff)\n gd_q5 = np.isfinite(sst_q5_lat_diff)\n q3_ave = np.mean(sst_q3_lat_diff[gd_q3])\n q3_med = np.median(sst_q3_lat_diff[gd_q3])\n q3_std = np.std(sst_q3_lat_diff[gd_q3])\n q3_rsd = rb.mad(sst_q3_lat_diff[gd_q3])\n q5_ave = np.mean(sst_q5_lat_diff[gd_q5])\n q5_med = np.median(sst_q5_lat_diff[gd_q5])\n q5_std = np.std(sst_q5_lat_diff[gd_q5])\n q5_rsd = rb.mad(sst_q5_lat_diff[gd_q5])\n\n q3str = 'ql=3:'+'Mean='+\"{0:.3f}\".format(q3_ave)+' Median='+\"{0:.3f}\".format(q3_med)+' SD='+\"{0:.3f}\".format(q3_std)+' RSD='+\"{0:.3f}\".format(q3_rsd)\n q5str = 'ql=4&5:'+'Mean='+\"{0:.3f}\".format(q5_ave)+' Median='+\"{0:.3f}\".format(q5_med)+' SD='+\"{0:.3f}\".format(q5_std)+' RSD='+\"{0:.3f}\".format(q5_rsd)\n file_str = plotfile + \"_diff\" + \".png\"\n\n fig, ax = plt.subplots()\n plt.plot(sst_q3_lat_diff, lat, '.', color='g', alpha=0.2, label=q3str)\n plt.plot(sst_q5_lat_diff, lat, '.', color='r', alpha=0.2, label=q5str)\n ax = plt.gca()\n ax.set_ylim([-91,90])\n ticks = ax.get_yticks()\n ax.set_yticks(np.linspace(-90, 90, 7))\n plt.legend(loc=4, fontsize=8)\n plt.xlabel(r'mean SST difference (L3U-L4 analysis) / $K$')\n plt.ylabel(r'Latitude / $degrees$, N')\n plt.title(file_str, fontsize=8)\n fig.tight_layout()\n plt.savefig(file_str)\n plt.close('all')\n\n return\n\nif __name__ == \"__main__\":\n\n #----------------------------------------------\n parser = OptionParser(\"usage: %prog instrument year month day\")\n (options, args) = parser.parse_args()\n try:\n instrument = args[0]\n year = args[1]\n month = args[2]\n day = args[3]\n except:\n instrument = 'AVHRRMTA_G'\n year = 2010\n month = 10\n day = 10\n\n FLAG_plot = 1\n\n# path_in = '/Users/michaeltaylor/Desktop/REPOS/AVHRR_SST/'\n# path_l4 = '/Users/michaeltaylor/Desktop/REPOS/AVHRR_SST/DATA/L4/' \n# path_in = '/gws/nopw/j04/fiduceo/Users/mtaylor/avhrr_sst/'\n# path_l4 = '/gws/nopw/j04/fiduceo/Users/mtaylor/avhrr_sst/DATA/L4/' \n\n path_in = os.getcwd()\n path_l4 = path_in + \"/\" + \"DATA/L4/\" + str(year) + \"/\" + str(month) + \"/\" + str(day)\n# path_l4 = \"/gws/nopw/j04/esacci_sst/output/CDR2.1_release/Analysis/L4/v2.1/\" + str(year) + \"/\" + str(month) + \"/\" + str(day) + \"/\"\n\n # LEVEL-4 FILES:\n # -------------\n # /gws/nopw/j04/esacci_sst/output/CDR2.1_release/Analysis/L4/v2.1/\n\n# file_l4 = '20040404120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_CDR2.1-v02.0-fv01.0.nc' # NOAA-17, -16\n# file_l4 = '20101010120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_CDR2.1-v02.0-fv01.0.nc'\n file_l4 = str(year)+str(month)+str(day)+'120000-ESACCI-L4_GHRSST-SSTdepth-OSTIA-GLOB_CDR2.1-v02.0-fv01.0.nc'\n\n print('file_l4=', file_l4)\n\n # LEVEL-3U FILES: \n # --------------\n # /gws/nopw/j04/fiduceo/Data/CDR/AVHRR_SST/v2.10.2/\n\n# path_l3u = \"/gws/nopw/j04/fiduceo/Data/CDR/AVHRR_SST/v2.10.2/\" + instrument + \"/\" + str(year) + \"/\" + str(month) + \"/\" + str(day) + \"/\"\n path_l3u = path_in + \"/\" \"DATA\" + \"/\" + instrument + \"/\" + str(year) + \"/\" + str(month) + \"/\" + str(day) + \"/\"\n\n print('path_l3u=', path_l3u)\n\n # HARMONISATION FILES:\n # -------------------\n # /gws/nopw/j04/fiduceo/Users/jmittaz/FCDR/Mike/FCDR_AVHRR/GBCS/dat_cci/\n\n file_in_l4 = os.path.join(path_l4,file_l4)\n dl4 = xarray.open_dataset(file_in_l4)\n sst_l4 = np.array(dl4['analysed_sst'][0,:,:])\n nlat = 3600\n\n if os.path.isdir(path_in):\n nclist = os.path.join(path_l3u,'*.nc')\n filelist = glob.glob(nclist)\n\n sst_q3_lat_total = np.zeros(nlat)\n sst_q5_lat_total = np.zeros(nlat)\n sst_q3_lat_nvals = np.zeros(nlat)\n sst_q5_lat_nvals = np.zeros(nlat)\n sst_q3_lat_mean = np.ones(nlat)*np.nan\n sst_q5_lat_mean = np.ones(nlat)*np.nan\n sst_q3_lat_mean_diff = np.ones(nlat)*np.nan\n sst_q5_lat_mean_diff = np.ones(nlat)*np.nan\n sst_l4_lat_mean = np.ones(nlat)*np.nan\n\n for ifile in range(len(filelist)):\n\n print('file_in=',str(ifile))\n file_in = str(filelist[ifile])\n ds = xarray.open_dataset(file_in)\n if instrument == 'AVHRRMTA_G':\n file_out = file_in[-80:-3] # filename without path # MTA has one extra char\n else:\n file_out = file_in[-79:-3] # filename without path\n lat = np.array(ds['lat'])\n lon = np.array(ds['lon'])\n sst = np.array(ds['sea_surface_temperature'][0,:,:])\n ql = np.array(ds['quality_level'][0,:,:])\n flags = np.array(ds['l2p_flags'][0,:,:])\n\n q3 = ql==3\n q5 = ql==5\n \n sst_q3_lat = []\n sst_q5_lat = []\n sst_q3_lat_l4 = []\n sst_q5_lat_l4 = []\n sst_q3_lat_sum = np.zeros(len(lat))\n sst_q5_lat_sum = np.zeros(len(lat))\n sst_q3_lat_n = np.zeros(len(lat))\n sst_q5_lat_n = np.zeros(len(lat))\n\n for i in range(sst.shape[0]):\n\n sst_q3_lat.append(np.mean(sst[i,np.where(q3[i,:])]))\n sst_q3_lat_l4.append(np.mean(sst_l4[i,np.where(q3[i,:])]))\n sst_q5_lat.append(np.mean(sst[i,np.where(q5[i,:])]))\n sst_q5_lat_l4.append(np.mean(sst_l4[i,np.where(q5[i,:])]))\n sst_q3_lat_sum[i] = np.sum(sst[i,np.where(q3[i,:])])\n sst_q5_lat_sum[i] = np.sum(sst[i,np.where(q5[i,:])])\n sst_q3_lat_n[i] = sst[i,np.where(q3[i,:])].shape[1]\n sst_q5_lat_n[i] = sst[i,np.where(q5[i,:])].shape[1]\n\n sst_q3_lat_diff = np.array(sst_q3_lat) - np.array(sst_q3_lat_l4)\n sst_q5_lat_diff = np.array(sst_q5_lat) - np.array(sst_q5_lat_l4)\n\n if FLAG_plot:\n\n plotfile = instrument + '_' + file_out[:13]\n plot_sst_lat(lat,sst_q3_lat,sst_q5_lat,plotfile)\n plot_sst_lat_diff(lat,sst_q3_lat_diff,sst_q5_lat_diff,plotfile)\n\n sst_q3_lat_total += sst_q3_lat_sum\n sst_q5_lat_total += sst_q5_lat_sum \n sst_q3_lat_nvals += sst_q3_lat_n\n sst_q5_lat_nvals += sst_q5_lat_n\n\n gd = np.isfinite(sst_q3_lat_nvals)\n sst_q3_lat_mean[gd] = sst_q3_lat_total[gd] / sst_q3_lat_nvals[gd]\n gd = np.isfinite(sst_q5_lat_nvals)\n sst_q5_lat_mean[gd] = sst_q5_lat_total[gd] / sst_q5_lat_nvals[gd] \n sst_l4_lat_mean = np.mean(sst_l4,axis=1) \n gd = np.isfinite(sst_q3_lat_mean)\n for k in range(nlat):\n nonan = np.where(sst_l4[k,:] > 0.0)\n sst_l4_lat_mean[k] = np.mean(sst_l4[k,nonan])\n sst_q3_lat_mean_diff[gd] = sst_q3_lat_mean[gd] - sst_l4_lat_mean[gd]\n gd = np.isfinite(sst_q5_lat_mean)\n sst_q5_lat_mean_diff[gd] = sst_q5_lat_mean[gd] - sst_l4_lat_mean[gd]\n\n if FLAG_plot:\n\n plotfile = instrument + '_' + file_out[:8]\n plot_sst_lat(lat,sst_q3_lat_mean,sst_q5_lat_mean,plotfile)\n plot_sst_lat_diff(lat,sst_q3_lat_mean_diff,sst_q5_lat_mean_diff,plotfile)\n\n print('** END')\n\n\n", "sub_path": "sst_lat_l3u_l4.py", "file_name": "sst_lat_l3u_l4.py", "file_ext": "py", "file_size_in_byte": 8234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "seaborn.set", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.isfinite", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 47, "usage_type": "call"}, {"api_name": "statsmodels.robust.mad", "line_number": 48, "usage_type": "call"}, {"api_name": "statsmodels.robust", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 51, "usage_type": "call"}, {"api_name": "statsmodels.robust.mad", "line_number": 52, "usage_type": "call"}, {"api_name": "statsmodels.robust", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "optparse.OptionParser", "line_number": 78, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "xarray.open_dataset", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 142, "usage_type": "attribute"}, {"api_name": "xarray.open_dataset", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "314512448", "text": "# Copyright 2021 IBM Corp. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUtilities for end2end testing.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nfrom subprocess import call, check_call\ntry:\n from subprocess import DEVNULL # Python 3\nexcept ImportError:\n DEVNULL = open(os.devnull, 'wb') # pylint: disable=consider-using-with\n\nimport pytest\n\n# Server nickname or server group nickname in WBEM server definition file\nTEST_SERVER_IMAGE = os.getenv('TEST_SERVER_IMAGE', None)\n\n\n@pytest.fixture(\n params=[TEST_SERVER_IMAGE],\n scope='module'\n)\ndef server_url(request):\n \"\"\"\n Fixture that starts a WBEM server in a Docker image and returns its URL.\n\n The TCP ports used on the host side are 15988 and 15989 so that they do\n not conflict with a WBEM server the user may have set up manually, which\n typically would use the standard ports 5988 and 5989.\n \"\"\"\n image = request.param\n if image is None:\n raise ValueError(\"TEST_SERVER_IMAGE variable not specified\")\n\n # The container name and ports are chosen to minimize the potential of\n # conflicts. They are fixed so multiple instances of the test cannot run\n # in parallel on the same system.\n container = 'pywbemtools_test_server'\n host_port_http = '15988'\n host_port_https = '15989'\n\n call(['docker', 'rm', container, '--force'],\n stdout=DEVNULL, stderr=DEVNULL)\n\n check_call(['docker', 'create',\n '--name', container,\n '--publish', '{}:5988'.format(host_port_http),\n '--publish', '{}:5989'.format(host_port_https),\n image],\n stdout=DEVNULL)\n\n check_call(['docker', 'start', container], stdout=DEVNULL)\n\n yield 'https://localhost:15989'\n\n check_call(['docker', 'rm', container, '--force'], stdout=DEVNULL)\n", "sub_path": "tests/end2endtest/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "subprocess.DEVNULL", "line_number": 26, "usage_type": "name"}, {"api_name": "os.devnull", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 57, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 58, "usage_type": "name"}, {"api_name": "subprocess.check_call", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 65, "usage_type": "name"}, {"api_name": "subprocess.check_call", "line_number": 67, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 67, "usage_type": "name"}, {"api_name": "subprocess.check_call", "line_number": 71, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 71, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "621410336", "text": "from setuptools import setup, find_packages\nimport os\nhere = os.path.abspath(os.path.dirname(__file__))\n\nREADME = \"Table-DUMP\"\n\nrequires = [\n 'pyramid==1.10.4',\n 'redis==3.4.1',\n 'google-cloud-bigquery',\n 'requests==2.23.0',\n 'rq==1.4.2',\n 'google-cloud-storage',\n 'googleapis_common_protos'\n]\n\nsetup(name='table_dump_application',\n version=0.1,\n description='table_dump_application',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pylons\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\"\n ],\n keywords=\"web services\",\n author='telus',\n author_email='',\n url='',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n paster_plugins=['pyramid'],\n entry_points=\"\"\"\\\n [paste.app_factory]\n main = src:main\n [console_scripts]\n table_dump_start = src.get_big_query\n \"\"\",\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.abspath", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 3, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "580782454", "text": "import bs4 as bs\nimport pickle\nimport requests\nimport os\nimport datetime as dt\nimport pandas_datareader.data as web\nfrom matplotlib import style\nimport matplotlib.dates as mdates\nimport calendar\nimport pandas as pd\nimport numpy as np\ndef get_tickers():\n page=requests.get('http://thestockmarketwatch.com/markets/pre-market/today.aspx')\n soup=bs.BeautifulSoup(page.text, 'html.parser')\n table= soup.find('table', id='tblMovers')\n stocks=table.find_all('td',{'class':'tdSymbol'})\n\n tickers=[item.find(class_='symbol').get_text() for item in stocks]\n print(tickers)\n\n links=[]\n for item in stocks:\n #find all with a tag, then get href property of a\n link=item.find('a')\n links.append('thestockmarketwatch.com'+link.get('href'))\n #return(tickers)\n premarket_vol=[]\n premarket_last=[]\n premarket_high=[]\n premarket_low=[]\n last_news=[]\n last_news_date=[]\n for ticker in tickers:\n link=requests.get('https://www.nasdaq.com/symbol/'+ ticker +'/premarket-chart')\n link=bs.BeautifulSoup(link.text, 'html.parser')\n premarket_vol.append(link.find(id='quotes_content_left_lblVolume').get_text())\n premarket_last.append(link.find(id='quotes_content_left_lblLastsale').get_text())\n premarket_high.append(link.find(id='quotes_content_left_lblHighprice').get_text())\n premarket_low.append(link.find(id='quotes_content_left_lblLowprice').get_text())\n\n link2=requests.get('https://www.nasdaq.com/symbol/'+ticker)\n link2=bs.BeautifulSoup(link2.text,'html.parser')\n news=link2.find(id='CompanyNewsCommentary')\n last_news.append(news.find('a').get('href'))\n last_news_date.append(news.find('small').get_text())\n df2=pd.DataFrame({'PreMarket Volume':premarket_vol, 'PreMarket Low':premarket_low, 'PreMarket High':premarket_high, 'PreMarket Last':premarket_last, 'Symbol':tickers})\n #can use dictionary but become alphabetical order\n pd.options.display.max_colwidth = 200\n pd.options.display.max_columns=200\n pd.options.display.expand_frame_repr=True\n\n # pd.set_option('expand_frame_repr', True)\n # pd.set_option('max_columns',200)\n #change max width so can display full text\n\n df=pd.DataFrame(np.column_stack([tickers, premarket_vol, premarket_last, premarket_low, premarket_high, last_news, last_news_date]), columns=['Symbol', 'PreMarket Vol', 'PreMarket Last', 'Premarket Low', 'PreMarket High', 'Latest News', 'News Date and Source'])\n print(df)\n\nget_tickers()\n\ndef analyze():\n style.use('ggplot')\n today=dt.date.today()\n day=calendar.day_name[today.weekday()]\n\n #get weekday before\n if day==\"Sunday\":\n yesterday=today - dt.timedelta(days=2)\n elif day==\"Monday\":\n yesterday=today - dt.timedelta(days=3)\n else:\n yesterday=today - dt.timedelta(days=1)\n #use timedelta for difference\n start=yesterday\n end=dt.datetime.now()\n df=web.DataReader(tickers[0], 'morningstar', start, end)\n print(df)\n\n\n", "sub_path": "ml_finance/get_data.py", "file_name": "get_data.py", "file_ext": "py", "file_size_in_byte": 3007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.options", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pandas.options", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pandas.options", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.style.use", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 63, "usage_type": "attribute"}, {"api_name": "calendar.day_name", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas_datareader.data.DataReader", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 76, "usage_type": "name"}]} +{"seq_id": "21937308", "text": "import BaseHTTPServer\nimport SimpleHTTPServer\nimport SocketServer\nimport cgi\nimport json\nimport logging\nimport os\nimport posixpath\nimport re\nimport traceback\nimport urllib\nimport urlparse\n\nimport Constants\n\n\nclass HTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):\n\n def __init__(self, RequestHandlerClass,controller):\n self.basedir =os.path.join(os.path.dirname(os.path.relpath(__file__)),\"gui\")\n self.overwrite_map = ({\n '.png': 'image/png',\n '.js': 'text/javascript; charset=utf-8'\n })\n self.controller=controller\n BaseHTTPServer.HTTPServer.__init__(self, (\"0.0.0.0\",8080), RequestHandlerClass, True)\n self.serve_forever()\n\n def getUrlPath(self, path):\n '''\n get an url path that can be used to obtain a file given\n\n :param path:\n :return: None if no mapping is possible\n '''\n fp = os.path.realpath(path)\n return fp\n\n\n\nclass HTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n CONTROLURL=\"/control\"\n def __init__(self, request, client_address, server):\n self.logger=logging.getLogger(Constants.LOGNAME)\n # allow write buffering\n # see https://lautaportti.wordpress.com/2011/04/01/basehttprequesthandler-wastes-tcp-packets/\n self.wbufsize = -1\n self.id = None\n self.logger.debug(\"receiver thread started\", client_address)\n SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)\n\n def do_POST(self):\n maxlen = 5000000\n (path, sep, query) = self.path.partition('?')\n if not path.startswith(self.CONTROLURL):\n self.send_error(404, \"unsupported post url\")\n return\n try:\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n if ctype == 'multipart/form-data':\n postvars = cgi.parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers.getheader('content-length'))\n if length > maxlen:\n raise Exception(\"too much data\" + unicode(length))\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n elif ctype == 'application/json':\n length = int(self.headers.getheader('content-length'))\n if length > maxlen:\n raise Exception(\"too much data\" + unicode(length))\n postvars = {'_json': self.rfile.read(length)}\n else:\n postvars = {}\n requestParam = urlparse.parse_qs(query, True)\n requestParam.update(postvars)\n self.handleControlRequest(path, requestParam)\n except Exception as e:\n txt = traceback.format_exc()\n self.logger.debug(\"unable to process request for \", path, query, txt)\n self.send_response(500, txt)\n self.end_headers()\n return\n\n # overwrite this from SimpleHTTPRequestHandler\n def send_head(self):\n path = self.translate_path(self.path)\n if path is None:\n return\n \"\"\"Common code for GET and HEAD commands.\n\n This sends the response code and MIME headers.\n\n Return value is either a file object (which has to be copied\n to the outputfile by the caller unless the command was HEAD,\n and must be closed by the caller under all circumstances), or\n None, in which case the caller has nothing further to do.\n\n \"\"\"\n\n f = None\n if os.path.isdir(path):\n if not self.path.endswith('/'):\n # redirect browser - doing basically what apache does\n self.send_response(301)\n self.send_header(\"Location\", self.path + \"/\")\n self.end_headers()\n return None\n for index in \"index.html\", \"index.htm\":\n index = os.path.join(path, index)\n if os.path.exists(index):\n path = index\n break\n else:\n return self.list_directory(path)\n base, ext = posixpath.splitext(path)\n if ext in self.server.overwrite_map:\n ctype = self.server.overwrite_map[ext]\n else:\n ctype = self.guess_type(path)\n try:\n # Always read in binary mode. Opening files in text mode may cause\n # newline translations, making the actual size of the content\n # transmitted *less* than the content-length!\n f = open(path, 'rb')\n except IOError:\n self.send_error(404, \"File not found\")\n return None\n self.send_response(200)\n self.send_header(\"Content-type\", ctype)\n fs = os.fstat(f.fileno())\n self.send_header(\"Content-Length\", str(fs[6]))\n if path.endswith(\".js\") or path.endswith(\".less\"):\n self.send_header(\"cache-control\", \"private, max-age=0, no-cache\")\n self.send_header(\"Last-Modified\", self.date_time_string(fs.st_mtime))\n self.end_headers()\n return f\n def isForwarded(self):\n fh=self.headers.getheaders(\"x-forwarded-for\")\n if fh is None or len(fh) == 0:\n return False\n return True\n # overwrite this from SimpleHTTPRequestHandler\n def translate_path(self, path):\n \"\"\"Translate a /-separated PATH to the local filename syntax.\n\n Components that mean special things to the local file system\n (e.g. drive or directory names) are ignored. (XXX They should\n probably be diagnosed.)\n\n \"\"\"\n # abandon query parameters\n (path, sep, query) = path.partition('?')\n path = path.split('#', 1)[0]\n path = posixpath.normpath(urllib.unquote(path).decode('utf-8'))\n if path.startswith(self.CONTROLURL):\n requestParam = urlparse.parse_qs(query, True)\n self.handleControlRequest(path, requestParam)\n return None\n if path == \"\" or path == \"/\":\n path = \"/index.html\"\n self.send_response(301)\n self.send_header(\"Location\", path)\n self.end_headers()\n return None\n words = path.split('/')\n words = filter(None, words)\n path = \"\"\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (\".\", \"..\"): continue\n path = os.path.join(path, word)\n self.logger.debug(\"request path/query\", path, query)\n # pathmappings expect to have absolute pathes!\n rtPath=os.path.join(self.server.basedir, path)\n #special handling for forwarded requests\n if self.isForwarded():\n translates=[\"^[/]*index.html$\",\"^[/]*manifest.json$\",\"^[/]*favicon.ico$\",\"^[/]*icon.png$\"]\n for tr in translates:\n trname=re.sub(\"(\\\\.[^.]*)$\",\"Ext\\\\1\",path)\n if re.match(tr,path) is not None:\n tryPath=os.path.join(self.server.basedir,trname)\n if os.path.exists(tryPath):\n return tryPath\n else:\n return rtPath\n return rtPath\n\n\n # send a json encoded response\n def sendJsonResponse(self, rtj, requestParam):\n if not rtj is None:\n self.send_response(200)\n if not requestParam.get('callback') is None:\n rtj = \"%s(%s);\" % (requestParam.get('callback'), rtj)\n self.send_header(\"Content-type\", \"text/javascript\")\n else:\n self.send_header(\"Content-type\", \"application/json\")\n self.send_header(\"Content-Length\", str(len(rtj)))\n self.send_header(\"Last-Modified\", self.date_time_string())\n self.end_headers()\n self.wfile.write(rtj)\n self.logger.debug(\"nav response\", rtj)\n else:\n raise Exception(\"empty response\")\n\n\n def handleControlRequest(self, path, requestParam):\n '''\n control requests\n :param path:\n :param requestParam:\n :return:\n '''\n try:\n rtj=self.server.controller.handleRequest(requestParam)\n self.sendJsonResponse(json.dumps(rtj), requestParam)\n except Exception as e:\n text = e.message + \"\\n\" + traceback.format_exc()\n self.logger.debug(\"unable to process request for controlrequest \", text)\n self.sendJsonResponse(json.dumps({\n \"status\":\"ERROR\",\n \"info\":e.message,\n \"detail\":text\n }),requestParam)\n return\n\n", "sub_path": "server/httpserver.py", "file_name": "httpserver.py", "file_ext": "py", "file_size_in_byte": 7709, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "SocketServer.ThreadingMixIn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "BaseHTTPServer.HTTPServer", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 20, "usage_type": "call"}, {"api_name": "BaseHTTPServer.HTTPServer.__init__", "line_number": 26, "usage_type": "call"}, {"api_name": "BaseHTTPServer.HTTPServer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "SimpleHTTPServer.SimpleHTTPRequestHandler", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "Constants.LOGNAME", "line_number": 44, "usage_type": "attribute"}, {"api_name": "SimpleHTTPServer.SimpleHTTPRequestHandler.__init__", "line_number": 50, "usage_type": "call"}, {"api_name": "SimpleHTTPServer.SimpleHTTPRequestHandler", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cgi.parse_header", "line_number": 59, "usage_type": "call"}, {"api_name": "cgi.parse_multipart", "line_number": 61, "usage_type": "call"}, {"api_name": "cgi.parse_qs", "line_number": 66, "usage_type": "call"}, {"api_name": "urlparse.parse_qs", "line_number": 74, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "posixpath.splitext", "line_number": 115, "usage_type": "call"}, {"api_name": "os.fstat", "line_number": 130, "usage_type": "call"}, {"api_name": "posixpath.normpath", "line_number": 154, "usage_type": "call"}, {"api_name": "urllib.unquote", "line_number": 154, "usage_type": "call"}, {"api_name": "urlparse.parse_qs", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.splitdrive", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 180, "usage_type": "call"}, {"api_name": "re.match", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 217, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 219, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "555714026", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass GoodLifeFitnessSpider(scrapy.Spider):\n name = \"goodlifefitness\"\n item_attributes = {'brand': \"GoodLife Fitness\"}\n allowed_domains = [\"www.goodlifefitness.com\"]\n start_urls = (\n 'https://www.goodlifefitness.com/locations',\n )\n\n def start_requests(self):\n template = 'https://www.goodlifefitness.com/api/club/filterclubsforcitypage'\n\n headers = {\n 'Accept': 'application/json',\n }\n\n yield scrapy.http.FormRequest(\n url=template,\n method='GET',\n headers=headers,\n callback=self.parse\n )\n\n def parse(self, response):\n store_data = json.loads(response.body_as_unicode())\n for store in store_data:\n properties = {\n 'ref': store[\"ClubNo\"],\n 'name': store[\"ClubName\"],\n 'addr_full': store[\"Address1\"],\n 'city': store[\"City\"],\n 'state': store[\"Province\"],\n 'postcode': store[\"PostalCode\"],\n 'phone': store[\"Phone\"],\n 'lat': float(store[\"Lat\"]),\n 'lon': float(store[\"Long\"])\n }\n\n yield GeojsonPointItem(**properties)", "sub_path": "locations/spiders/goodlifefitness.py", "file_name": "goodlifefitness.py", "file_ext": "py", "file_size_in_byte": 1310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scrapy.http.FormRequest", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.http", "line_number": 23, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "locations.items.GeojsonPointItem", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "94678980", "text": "from forwardscoverbot import utils\nfrom forwardscoverbot import dbwrapper\nfrom forwardscoverbot import keyboards\nfrom forwardscoverbot import messages\n\nfrom telegram import MessageEntity\nfrom telegram import ParseMode\nfrom telegram import constants as t_consts\nfrom telegram import InlineKeyboardButton\nfrom telegram import InlineKeyboardMarkup\n\nfrom telegram.ext.dispatcher import run_async\n\nimport html\n\n\ndef help_command(update, context):\n keyboard = keyboards.github_link_kb()\n text = (\n \"Do you want to send a message to someone or in a group, but you want to avoid \"\n \"that someone could spread it on telegram with your name? This bot just echos \"\n \"your messages.\\n\\nSend here what you want and you will get the same message \"\n \"back, then forward the message where you want and the forward label will have \"\n \"the name of this bot.\\nIt works also if you edit messages or forward messages. \"\n \"It also keeps the same text formatting style.\\n\\n\"\n \"Supported commands:\\n\"\n \"/disablewebpagepreview\\n\"\n \"/removecaption\\n\"\n \"/addcaption\\n\"\n \"/removebuttons\\n\"\n \"/addbuttons\"\n )\n update.message.reply_text(text=text, parse_mode=ParseMode.HTML, reply_markup=keyboard)\n\n\n\ndef disable_web_page_preview(update, context):\n if not update.message.reply_to_message:\n text = (\"This command permits to remove the web page preview from a message with \"\n \"a link.\\n\\nUse it replying to the message the bot already echoed and you \"\n \"want to disable the preview with this command.\")\n update.message.reply_text(text=text)\n return\n\n if not update.message.reply_to_message.text:\n text = \"This message does not have a web page preview\"\n update.message.reply_to_message.reply_text(text=text, quote=True)\n return\n\n entities_list = [MessageEntity.URL, MessageEntity.TEXT_LINK]\n entities = update.message.reply_to_message.parse_entities(entities_list)\n if len(entities) == 0:\n text = \"This message does not have a web page preview\"\n update.message.reply_to_message.reply_text(text=text, quote=True)\n return\n\n messages.process_message(update=update, context=context, message=update.message.reply_to_message, disable_web_page_preview=True)\n\n\n\ndef remove_caption(update, context):\n if not update.message.reply_to_message:\n text = (\n \"This command permits to remove caption from a message. Reply with this command to \"\n \"the message where you want to remove the caption. Be sure the message has a caption.\"\n )\n update.message.reply_text(text=text)\n return\n\n if not update.message.reply_to_message.caption:\n text = \"This message has no caption, so what should i remove? Use this command with messages having caption.\"\n context.bot.sendMessage(\n chat_id=update.message.from_user.id,\n text=text,\n reply_to_message_id=update.message.reply_to_message.message_id,\n quote=True\n )\n return\n\n messages.process_message(update=update, context=context, message=update.message.reply_to_message, remove_caption=True)\n\n\n\ndef remove_buttons(update, context):\n if not update.message.reply_to_message:\n text = (\n \"This command permits to remove buttons from a message. Reply with this command to \"\n \"the message where you want to remove the buttons. Be sure the message has buttons.\"\n )\n update.message.reply_text(text=text)\n return\n\n if not update.message.reply_to_message.reply_markup:\n text = \"This message has no buttons, so what should i remove? Use this command with messages having buttons.\"\n context.bot.sendMessage(\n chat_id=update.message.from_user.id,\n text=text,\n reply_to_message_id=update.message.reply_to_message.message_id,\n quote=True\n )\n return\n\n messages.process_message(update=update, context=context, message=update.message.reply_to_message, remove_buttons=True) \n\n\n\ndef add_caption(update, context):\n if not update.message.reply_to_message:\n text = (\n \"This command permits to add a caption to a message. Reply with this command and the caption after it to \"\n \"the message where you want to add the caption.\\n\\nIf the message already has a caption \"\n \"this command will overwrite the current caption with the new one.\\n\"\n \"if the message doesn't support a caption, it simply won't add it, no errors are returned\\n\\n\\n\"\n \"Note: if the message is sent by you, you can just edit it to add the caption. This command is intended \"\n \"in case for example you are fowarding from a channel a big file you don't want to download and \"\n \"upload again.\"\n )\n update.message.reply_text(text=text, parse_mode='HTML')\n return\n\n caption = \" \".join(update.message.text.split(\" \")[1:])\n caption_html = \" \".join(update.message.text_html.split(\" \")[1:])\n\n if len(caption) > t_consts.MAX_CAPTION_LENGTH:\n text = \"This caption is too long. max allowed: {} chars. Please retry removing {} chars.\".format(\n t_consts.MAX_CAPTION_LENGTH,\n len(caption) - t_consts.MAX_CAPTION_LENGTH\n )\n context.bot.sendMessage(\n chat_id=update.message.from_user.id,\n text=text,\n reply_to_message_id=update.message.reply_to_message.message_id,\n quote=True\n )\n return\n\n messages.process_message(update=update, context=context, message=update.message.reply_to_message, custom_caption=caption_html)\n\n\n\ndef add_buttons(update, context):\n usage = (\n \"Using this command you can add buttons to messages.\\nReply with this command to the message where you want to add the buttons. Example:\\n\\n\"\n \"/addbuttons first link=https://telegram.org && second link same row=https://google.it &&& third link new row=https://t.me\"\n \"\\n\\nSo the format for a button is [text]=[link]. Buttons on the same line are separated by && and on new lines are separeted by &&&.\"\n )\n if not update.message.reply_to_message or len(context.args) < 1:\n update.message.reply_text(text=usage, parse_mode='HTML')\n return\n \n param = ' '.join(context.args)\n rows = param.split('&&&')\n lst = []\n for row in rows:\n try:\n row_lst = []\n row_buttons = row.split('&&')\n for button in row_buttons:\n text, link = button.split('=')\n text = text.strip()\n link = link.strip()\n button = InlineKeyboardButton(text=text, url=link)\n \n row_lst.append(button)\n lst.append(row_lst)\n except Exception as e:\n error = 'ERROR formatting the buttons'\n update.message.reply_text(text=error, parse_mode='HTML')\n keyboard = InlineKeyboardMarkup(lst)\n messages.process_message(update=update, context=context, message=update.message.reply_to_message, custom_reply_markup=keyboard)\n \n\n@utils.only_admin\ndef stats(update, context):\n update.message.reply_text(text=dbwrapper.stats_text(), parse_mode=ParseMode.HTML)\n\n\n\n", "sub_path": "forwardscoverbot/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 7378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "forwardscoverbot.keyboards.github_link_kb", "line_number": 18, "usage_type": "call"}, {"api_name": "forwardscoverbot.keyboards", "line_number": 18, "usage_type": "name"}, {"api_name": "telegram.ParseMode.HTML", "line_number": 33, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 33, "usage_type": "name"}, {"api_name": "telegram.MessageEntity.URL", "line_number": 50, "usage_type": "attribute"}, {"api_name": "telegram.MessageEntity", "line_number": 50, "usage_type": "name"}, {"api_name": "telegram.MessageEntity.TEXT_LINK", "line_number": 50, "usage_type": "attribute"}, {"api_name": "forwardscoverbot.messages.process_message", "line_number": 57, "usage_type": "call"}, {"api_name": "forwardscoverbot.messages", "line_number": 57, "usage_type": "name"}, {"api_name": "forwardscoverbot.messages.process_message", "line_number": 80, "usage_type": "call"}, {"api_name": "forwardscoverbot.messages", "line_number": 80, "usage_type": "name"}, {"api_name": "forwardscoverbot.messages.process_message", "line_number": 103, "usage_type": "call"}, {"api_name": "forwardscoverbot.messages", "line_number": 103, "usage_type": "name"}, {"api_name": "telegram.constants.MAX_CAPTION_LENGTH", "line_number": 124, "usage_type": "attribute"}, {"api_name": "telegram.constants", "line_number": 124, "usage_type": "name"}, {"api_name": "telegram.constants.MAX_CAPTION_LENGTH", "line_number": 126, "usage_type": "attribute"}, {"api_name": "telegram.constants", "line_number": 126, "usage_type": "name"}, {"api_name": "telegram.constants.MAX_CAPTION_LENGTH", "line_number": 127, "usage_type": "attribute"}, {"api_name": "telegram.constants", "line_number": 127, "usage_type": "name"}, {"api_name": "forwardscoverbot.messages.process_message", "line_number": 137, "usage_type": "call"}, {"api_name": "forwardscoverbot.messages", "line_number": 137, "usage_type": "name"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 162, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 169, "usage_type": "call"}, {"api_name": "forwardscoverbot.messages.process_message", "line_number": 170, "usage_type": "call"}, {"api_name": "forwardscoverbot.messages", "line_number": 170, "usage_type": "name"}, {"api_name": "forwardscoverbot.dbwrapper.stats_text", "line_number": 175, "usage_type": "call"}, {"api_name": "forwardscoverbot.dbwrapper", "line_number": 175, "usage_type": "name"}, {"api_name": "telegram.ParseMode.HTML", "line_number": 175, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 175, "usage_type": "name"}, {"api_name": "forwardscoverbot.utils.only_admin", "line_number": 173, "usage_type": "attribute"}, {"api_name": "forwardscoverbot.utils", "line_number": 173, "usage_type": "name"}]} +{"seq_id": "479295861", "text": "from django.shortcuts import render\nfrom django.views.generic.base import View\nfrom lulu import settings\nfrom django.http import HttpResponse\nimport os\n# Create your views here.\nclass UploadPhoto(View):\n def get(self,request):\n return render(request,'photography/uploadphoto.html')\n\n def post(self,request):\n #获取文件对象\n try:\n\n a_file= request.FILES['myfile']\n\n filename = os.path.join(settings.MEDIA_ROOT, a_file.name)\n print(\"文件路径\",filename)\n with open(filename, \"wb\") as fin:\n data = a_file.file.read()\n fin.write(data)\n return HttpResponse(\"上传成功\")\n\n\n except Exception as error:\n\n return HttpResponse(\"%s上传失败\"%error)\n\n", "sub_path": "lulu/photography/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.views.generic.base.View", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "lulu.settings.MEDIA_ROOT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "lulu.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "293169823", "text": "\nimport sys\nimport json\nfrom benchmarker import Benchmarker\nimport time\nfrom datetime import datetime\nimport traceback\nfrom mysql_interface import SQL_Interface\nimport function_lib as lib\nfrom pprint import pprint\n\n# =====================================================================================\n# Read cli arguments from calling script\n\n# name of the terraform experiment\nexperiment_name = sys.argv[1]\n\n# unique identifier string tying this experiment together with the\n# experiments conducted for the other cloud providers in this round\nexperiment_meta_identifier = sys.argv[2]\n\n# name of cloud function provider for this experiment\nprovider = sys.argv[3]\n\n# name of the client provider\nclient_provider = sys.argv[4]\n\n# relative path to experiment.env file\nenv_file_path = sys.argv[5]\n\n# dev_mode\ndev_mode = eval(sys.argv[6]) if len(sys.argv) > 6 else False\n\n# verbose mode\nverbose = eval(sys.argv[7]) if len(sys.argv) > 7 else False\n\n# =====================================================================================\n\n# describe experiment, should be verbose enough to figure\n# out what the experiment does and what it attempts to test\ndescription = f\"\"\"\n{experiment_name}: This experiment tests the time it takes for\na single function instance to no longer be available due to inactivity in a lab-like\nsetting. The experiment is conducted by first invoking a single function 11 times via another \nfunction, the first time to make sure that the function instane is created, the the following\n10 times to create a baseline for a hot invocation so that it can be determined that the\nfirst was a coldstart.\nThen the function is invoked continually with increasing delay between invocations,\nuntil the function_identifier changes and the time elapsed between last function call is logged \nThis process is then repeated to verify the result.\n\"\"\"\n# =====================================================================================\n# create the benchmarker\nbenchmarker = Benchmarker(experiment_name=experiment_name,\n experiment_meta_identifier=experiment_meta_identifier,\n provider=provider,\n client_provider=client_provider,\n experiment_description=description,\n env_file_path=env_file_path,\n dev_mode=dev_mode,\n verbose=verbose)\n# =====================================================================================\n# database interface for logging results if needed\ndb = SQL_Interface(dev_mode)\n# name of table to insert data into\ntable = 'Coldstart'\n# =====================================================================================\n# set meta data for experiment\n# UUID from experiment\nexperiment_uuid = benchmarker.experiment.uuid\n\n# what function to test on (1-3)\nfx = 'function2'\nnested_fx = 'function3'\n\n# sleep for 15 minutes to ensure coldstart\nif not dev_mode:\n time.sleep(65*60)\n\n# values used for aborting experiment if it runs more than 24 hours\n_timeout = 24 * 60 * 60\nstart_time = time.time()\n\n\n# time to sleep in between invocations, start at 5 minutes\nsleep_time = 60\n# increment for each iteration\nincrement = sleep_time\n# granularity of result\ngranularity = 20\n# value for last response latency\ncold_identifier = None\n# flags for controlling granularity of sleep value\nlarge_increment = True\nminute_increment = True\n\n# results specific gathered and logged from logic of this experiment\nresults = []\n\n# sift away errors at runtime and report them later\nerrors = []\n# ======================================================================================\n# Convienience methods needed for this experiment\n\ninvoke_1_nested = [\n {\n \"function_name\": f\"{experiment_name}-{nested_fx}\",\n \"invoke_payload\": {}\n }\n ]\n\n# invoke function and return the result dict\ndef invoke(args:dict):\n response = benchmarker.invoke_function(function_name=fx,\n function_args= {'invoke_nested': args})\n if 'error' in response:\n return errors.append(response)\n nested_dict = response[list(response.keys())[1]]\n pprint(nested_dict)\n return nested_dict if 'error' not in nested_dict else errors.append(nested_dict)\n\n\n# the wrapper ends the experiment if any it can not return a valid value\ndef err_func(): return benchmarker.end_experiment()\n\n# convinience for not having to repeat values\ndef validate(x, y, z=None): return lib.iterator_wrapper(\n x, y, experiment_name, z, err_func)\n\n\n# parse data that needs to be logged to database.\ndef append_result(\n invo_id,\n minutes,\n seconds,\n granularity,\n cold,\n final,\n multithreaded=False) -> None:\n\n results.append({\n 'exp_id': experiment_uuid,\n 'invo_id': invo_id,\n 'minutes': minutes,\n 'seconds': seconds,\n 'granularity': granularity,\n 'threads':1,\n 'benchmark': 0.0,\n 'cold': cold,\n 'final': final\n })\n\n# =====================================================================================\n# The actual logic if the experiment\n\n# function to be given to validate function if not successful\n# if other action is desired give other function as body\ndef err_func(): benchmarker.end_experiment()\n\n# convinience for not having to repeat values\n# x = function to apply, y:str = context, z = arguments for x, if any\ndef validate(x, y, z=None): return lib.iterator_wrapper(\n x, y, experiment_name, z, err_func)\n\n# =====================================================================================\n\ntry:\n\n def find_cold_instance(sleep):\n from functools import reduce\n\n print(f'finding_cold_instance with {sleep} sleeptime')\n\n iterations = 50\n response_times = []\n\n time.sleep(sleep)\n\n # should be a cold invocation\n first_res = validate(invoke, 'first invocation from find_cold_instance', invoke_1_nested)\n cold_latency = first_res['execution_start'] - first_res['invocation_start']\n\n if verbose:\n print('first cold invocation:')\n pprint(first_res)\n print()\n\n if verbose:\n print(f'invoking function {iterations} times to find an average latency')\n\n for i in range(iterations):\n t1 = time.time()\n res = validate(invoke, 'invoking from warmtime baseline', invoke_1_nested)\n t2 = time.time()\n response_times.append(\n (i, res['execution_start']-res['invocation_start'], t2-t1)\n )\n\n response_times.sort(key=lambda x: x[1])\n\n sliced = response_times[10:40]\n\n sliced_avg = reduce(lambda x,y: x+y[1],[0.0] + sliced)/len(sliced)\n\n if cold_latency > sliced_avg * 2:\n return (first_res['instance_identifier'])\n\n elif sleep >= 7200:\n print('Aborting after trying to find cold instance for 2 hours')\n\n benchmarker.end_experiment()\n # log experiments specific results, hence results not obtainable from the generic Invocation object\n lib.log_experiment_specifics(experiment_name,\n experiment_uuid, \n len(errors), \n db.log_exp_result([lib.dict_to_query(x, table) for x in results]))\n sys.exit()\n\n else:\n return find_cold_instance(sleep+1200)\n\n\n def set_cold_values():\n global sleep_time, increment, granularity, cold_identifier, large_increment, minute_increment\n # global sleep_time, increment, granularity, latest_latency_time, large_increment, minute_increment\n while(True):\n if time.time() - start_time > _timeout:\n print('ABORTING due to 24 hour time constraint from set_cold_values function\\n')\n benchmarker.end_experiment()\n # log experiments specific results, hence results not obtainable from the generic Invocation object\n lib.log_experiment_specifics(experiment_name,\n experiment_uuid, \n len(errors), \n db.log_exp_result([lib.dict_to_query(x, table) for x in results]))\n sys.exit()\n\n time.sleep(sleep_time)\n result_dict = validate(invoke,f'invoking function: {fx} from cold start experiment',invoke_1_nested)\n local_identifier = result_dict['instance_identifier'] \n\n if(verbose):\n lib.dev_mode_print('logging time from set_cold_values', [\n ('experiment_uuid,result_dict[\\'instance_identifier\\']',experiment_uuid, result_dict['instance_identifier']),\n ('sleep_time / 60', int(sleep_time / 60)),\n ('sleep_time % 60', int( sleep_time % 60)),\n ('increment', increment),\n ('cold_identifier', cold_identifier),\n ('Final result', False),\n ('cold instance found',local_identifier != cold_identifier)\n ])\n if(local_identifier == cold_identifier):\n sleep_time += increment \n elif large_increment: \n sleep_time -= increment\n large_increment = False\n increment = granularity\n sleep_time += increment \n cold_identifier = local_identifier\n else:\n append_result(\n result_dict['identifier'],\n int(sleep_time / 60),\n int(sleep_time % 60),\n increment,\n local_identifier != cold_identifier,\n False)\n \n return\n\n def verify_result():\n global sleep_time, granularity, cold_identifier\n # variefy that result is valid by using same sleeptime between invocations 5 times\n iter_count = 5 if not dev_mode else 2\n while(iter_count > 0):\n if time.time() - start_time > _timeout:\n print('ABORTING due to 24 hour time constraint from varification loop\\n')\n benchmarker.end_experiment()\n # log experiments specific results, hence results not obtainable from the generic Invocation object\n lib.log_experiment_specifics(experiment_name,\n experiment_uuid, \n len(errors), \n db.log_exp_result([lib.dict_to_query(x, table) for x in results]))\n sys.exit()\n \n time.sleep(sleep_time)\n result_dict = validate(invoke, f'invoking function: {fx} from validation of cold start experiment',invoke_1_nested)\n local_identifier = result_dict['instance_identifier']\n\n if(verbose):\n lib.dev_mode_print(f'logging cold time: {local_identifier != cold_identifier} -> coldtime exp', [\n ('experiment_uuid, result_dict[instance_identifier]',experiment_uuid, result_dict['instance_identifier']),\n ('sleep_time / 60', int(sleep_time / 60)),\n ('sleep_time % 60', int(sleep_time % 60)),\n ('increment', increment),\n ('cold identifier', cold_identifier),\n ('Final result', False),\n ('cold instance found',local_identifier != cold_identifier)\n ])\n \n append_result(\n result_dict['identifier'],\n int(sleep_time / 60),\n int(sleep_time % 60),\n increment,\n local_identifier != cold_identifier,\n False)\n\n\n if(local_identifier == cold_identifier):\n sleep_time += granularity\n iter_count = 5 if not dev_mode else 2\n else:\n iter_count -= 1\n cold_identifier = local_identifier\n \n # run one last time and log result as final or \n time.sleep(sleep_time)\n\n result_dict = validate(invoke, f'invoking function: {fx} from final invocation of cold start experiment',invoke_1_nested)\n identifier = result_dict['instance_identifier'] \n if identifier == cold_identifier:\n # log final result\n append_result(\n result_dict['identifier'],\n int(sleep_time / 60),\n int(sleep_time % 60),\n granularity,\n True,\n True)\n else:\n sleep_time += granularity\n cold_identifier = identifier\n verify_result() \n\n\n # RUN THE EXPERIMENT LOGIC\n\n cold_identifier = find_cold_instance(0.0)\n\n set_cold_values()\n\n verify_result()\n\n # =====================================================================================\n # end of the experiment, results are logged to database\n benchmarker.end_experiment()\n # =====================================================================================\n # log experiments specific results, hence results not obtainable from the generic Invocation object\n lib.log_experiment_specifics(experiment_name,\n experiment_uuid, \n len(errors), \n db.log_exp_result([lib.dict_to_query(x, table) for x in results]))\n\nexcept Exception as e:\n # this will print to logfile\n print(f'Ending experiment {experiment_name} due to fatal runtime error')\n print(str(datetime.now()))\n print('Error message: ', str(e))\n print('Trace: {0}'.format(traceback.format_exc()))\n print('-----------------------------------------')\n benchmarker.end_experiment()\n", "sub_path": "experiments/coldstart-identifier-nested/coldstart-identifier-nested.py", "file_name": "coldstart-identifier-nested.py", "file_ext": "py", "file_size_in_byte": 14144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 35, "usage_type": "attribute"}, {"api_name": "benchmarker.Benchmarker", "line_number": 54, "usage_type": "call"}, {"api_name": "mysql_interface.SQL_Interface", "line_number": 64, "usage_type": "call"}, {"api_name": "benchmarker.experiment", "line_number": 70, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "benchmarker.invoke_function", "line_number": 114, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 119, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 124, "usage_type": "call"}, {"api_name": "function_lib.iterator_wrapper", "line_number": 127, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 158, "usage_type": "call"}, {"api_name": "function_lib.iterator_wrapper", "line_number": 162, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 177, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 185, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 194, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 203, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 211, "usage_type": "call"}, {"api_name": "function_lib.log_experiment_specifics", "line_number": 213, "usage_type": "call"}, {"api_name": "function_lib.dict_to_query", "line_number": 216, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 217, "usage_type": "call"}, {"api_name": "time.time", "line_number": 227, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 229, "usage_type": "call"}, {"api_name": "function_lib.log_experiment_specifics", "line_number": 231, "usage_type": "call"}, {"api_name": "function_lib.dict_to_query", "line_number": 234, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 235, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 237, "usage_type": "call"}, {"api_name": "function_lib.dev_mode_print", "line_number": 242, "usage_type": "call"}, {"api_name": "time.time", "line_number": 275, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 277, "usage_type": "call"}, {"api_name": "function_lib.log_experiment_specifics", "line_number": 279, "usage_type": "call"}, {"api_name": "function_lib.dict_to_query", "line_number": 282, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 283, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 285, "usage_type": "call"}, {"api_name": "function_lib.dev_mode_print", "line_number": 290, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 317, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 346, "usage_type": "call"}, {"api_name": "function_lib.log_experiment_specifics", "line_number": 349, "usage_type": "call"}, {"api_name": "function_lib.dict_to_query", "line_number": 352, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 357, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 357, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 359, "usage_type": "call"}, {"api_name": "benchmarker.end_experiment", "line_number": 361, "usage_type": "call"}]} +{"seq_id": "220079268", "text": "import cv2 # state of the art computer vision algorithms library\nimport numpy as np # fundamental package for scientific computing\nimport matplotlib.pyplot as plt # 2D plotting library producing publication quality figures\nimport pyrealsense2 as rs # Intel RealSense cross-platform open-source API\nprint(\"Environment Ready\")\n\ndef filtering(depth_frame):\n\n decimation = rs.decimation_filter() #Decimation reduces the number of samples so it reduces the amount of data that is metered and reduces the processing time\n #Need a fast enough implementation because the robot will interact with humans in real-time\n decimation.set_option(rs.option.filter_magnitude, 4)\n spatial = rs.spatial_filter() #Spatial Filtering. Spatial filtering is an image processing technique for changing the intensities \n # of a pixel according to the intensities of the neighboring pixels.\n spatial.set_option(rs.option.filter_magnitude, 5)\n spatial.set_option(rs.option.filter_smooth_alpha, 1)\n spatial.set_option(rs.option.filter_smooth_delta, 50)\n spatial.set_option(rs.option.holes_fill, 3)\n hole_filling = rs.hole_filling_filter() #Fill image regions with holes\n temporal = rs.temporal_filter()\n\n depth_to_disparity = rs.disparity_transform(True)\n disparity_to_depth = rs.disparity_transform(False)\n\n frame = depth_frame\n frame = decimation.process(frame)\n frame = depth_to_disparity.process(frame)\n frame = spatial.process(frame)\n frame = temporal.process(frame)\n frame = disparity_to_depth.process(frame) #Disparity filter can sufficiently reduce the network without destroying the multi-scale nature of the network.\n depth_frame = hole_filling.process(frame)\n\n return depth_frame\n\ndef colorSegementation(color, xmin, xmax, ymin, ymax):\n\n # Estrella red color\n hsv_frame = cv2.cvtColor(color, cv2.COLOR_RGB2HSV) # HSV separates luma, or the image intensity, from chroma or the color information\n # separate color components from intensity for robustness to lighting changes, or removing shadows.\n mask1 = cv2.inRange(hsv_frame, (0,50,20), (5,255,255))\n mask2 = cv2.inRange(hsv_frame, (175,50,20), (180,255,255))\n\n ## Merge the mask and crop the red regions\n masked = cv2.bitwise_or(mask1, mask2)\n red = cv2.bitwise_and(color, color, mask=masked)\n\n indices = np.where(red != [0])\n\n avg_x_ = None\n avg_y_ = None\n counter = 0\n if len(indices[0]) and len(indices[1]):\n avg_x_ = 0\n avg_y_ = 0\n counter = 0\n\n for y in indices[0]:\n if (y >= int(ymin) and y <= int(ymax)):\n\n avg_y_ += y\n counter += 1\n try:\n avg_y_ /= counter\n except: \n pass\n counter = 0\n for x in indices[1]:\n if (x >= int(xmin) and x <= int(xmax)):\n\n avg_x_ += x\n counter += 1\n try:\n avg_x_ /= counter\n except:\n pass\n #print(\"c:\",counter)\n \n if (avg_x_ >= int(xmin) and avg_x_ <= int(xmax)) and (avg_y_ >= int(ymin) and avg_y_ <= int(ymax)) and counter >= 3000: #counter is a hyperparameter\n \n return \"Estrella\"\n else:\n return \"Unknown\"\n\n# Setup:\npipe = rs.pipeline()\n\nconfig = rs.config()\nconfig.enable_stream(rs.stream.color, 424, 240, rs.format.rgb8, 30)\nconfig.enable_stream(rs.stream.depth, 424, 240, rs.format.z16, 30)\n#config.enable_stream(rs.stream.infrared, 1)\nconfig.enable_stream(rs.stream.infrared, 2)\nprofile = pipe.start(config)\n\n\n\n# Skip 15 first frames to give the Auto-Exposure time to adjust\nfor x in range(15):\n pipe.wait_for_frames()\n\n\n \n# Store next frameset for later processing:\nframeset = pipe.wait_for_frames()\ncolor_frame = frameset.get_color_frame()\ndepth_frame = frameset.get_depth_frame()\n\ndepth_frame = filtering(depth_frame)\n\n# Cleanup:\npipe.stop()\nprint(\"Frames Captured\")\n\ncolor = np.asanyarray(color_frame.get_data())\nplt.rcParams[\"axes.grid\"] = False\nplt.rcParams['figure.figsize'] = [12, 6]\nplt.imshow(color)\nplt.show()\ncolorizer = rs.colorizer()\ncolorized_depth = np.asanyarray(colorizer.colorize(depth_frame).get_data())\nplt.imshow(colorized_depth)\n\n# Create alignment primitive with color as its target stream:\nalign = rs.align(rs.stream.color)\nframeset = align.process(frameset)\n\n# Update color and depth frames:\naligned_depth_frame = frameset.get_depth_frame()\ncolorized_depth = np.asanyarray(colorizer.colorize(aligned_depth_frame).get_data())\n\n# Show the two frames together:\nimages = np.hstack((color, colorized_depth))\nplt.imshow(images)\nplt.show()\n\n# Standard OpenCV boilerplate for running the net:\nheight, width = color.shape[:2] #240, 424\n\nexpected = 300\naspect = width / height\nresized_image = cv2.resize(color, (round(expected * aspect), expected))\ncrop_start = round(expected * (aspect - 1) / 2)\ncrop_img = resized_image[0:expected, crop_start:crop_start+expected]\n\narg1 = \"MobileNetSSD_deploy.prototxt.txt\"\narg2 = \"MobileNetSSD_deploy.caffemodel\"\nnet = cv2.dnn.readNetFromCaffe(arg1, arg2)\ninScaleFactor = 0.007843\nmeanVal = 127.53\nclassNames = (\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\",\n \"cow\", \"diningtable\", \"dog\", \"horse\",\n \"motorbike\", \"person\", \"pottedplant\",\n \"sheep\", \"sofa\", \"train\", \"tvmonitor\")\n\nblob = cv2.dnn.blobFromImage(crop_img, inScaleFactor, (expected, expected), meanVal, False)\nnet.setInput(blob, \"data\")\ndetections = net.forward(\"detection_out\")\n\nresults = []\nfor i in np.arange(0, detections.shape[2]):\n idx = int(detections[0, 0, i, 1])\n #print(classNames[idx])\n if classNames[idx] == \"bottle\":\n #continue\n\n label = detections[0,0,i,1]\n conf = detections[0,0,i,2]\n xmin = detections[0,0,i,3]\n ymin = detections[0,0,i,4]\n xmax = detections[0,0,i,5]\n ymax = detections[0,0,i,6]\n\n className = classNames[int(label)]\n\n cv2.rectangle(crop_img, (int(xmin * expected), int(ymin * expected)), \n (int(xmax * expected), int(ymax * expected)), (255, 255, 255), 2)\n cv2.putText(crop_img, className, \n (int(xmin * expected), int(ymin * expected) - 5),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (255,255,255))\n\n plt.imshow(crop_img)\n plt.show()\n\n scale = height / expected\n xmin_depth = int((xmin * expected + crop_start) * scale)\n ymin_depth = int((ymin * expected) * scale)\n xmax_depth = int((xmax * expected + crop_start) * scale)\n ymax_depth = int((ymax * expected) * scale)\n xmin_depth,ymin_depth,xmax_depth,ymax_depth\n cv2.rectangle(colorized_depth, (xmin_depth, ymin_depth), \n (xmax_depth, ymax_depth), (255, 255, 255), 2)\n plt.imshow(colorized_depth)\n plt.show()\n\n x_depth_center = 0.5 * (xmax_depth + xmin_depth)\n y_depth_center = 0.5 * (ymax_depth + ymin_depth)\n\n depth = np.asanyarray(aligned_depth_frame.get_data())\n # Crop depth data:\n depth = depth[xmin_depth:xmax_depth,ymin_depth:ymax_depth].astype(float)\n\n # Get data scale from the device and convert to meters\n #depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()\n #depth = depth * depth_scale\n #dist,_,_,_ = cv2.mean(depth)\n dist = aligned_depth_frame.get_distance(int(x_depth_center), int(y_depth_center))\n\n\n #avg_x = 0.5 * (xmin * (width/expected) + xmax * (width/expected))\n #avg_y = 0.5 * (ymin * (height/expected) + ymax * (height/expected))\n #print(avg_x, avg_y)\n depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics\n depth = aligned_depth_frame.get_distance(int(x_depth_center), int(y_depth_center))\n realx, realy, realz = rs.rs2_deproject_pixel_to_point(depth_intrin, [int(x_depth_center),int(y_depth_center)],depth)\n\n objectType = colorSegementation(crop_img,int(xmin * expected), int(xmax * expected), int(ymin * expected), int(ymax * expected))\n results.append((realx, realy, realz, objectType, className))\n\nfor item in results:\n print(\"Detected a {0} of type {4} at (x, y, z) : {1:.3}, {2:.3}, {3:.3}.\".format(item[4], item[0], item[1], item[2], item[3]))\n\n\n#The detections are in the list 'results' in the form of '(x, y, z, drinkType, className)'", "sub_path": "objectDetection_final.py", "file_name": "objectDetection_final.py", "file_ext": "py", "file_size_in_byte": 8576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pyrealsense2.decimation_filter", "line_number": 9, "usage_type": "call"}, {"api_name": "pyrealsense2.option", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pyrealsense2.spatial_filter", "line_number": 12, "usage_type": "call"}, {"api_name": "pyrealsense2.option", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyrealsense2.option", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pyrealsense2.option", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pyrealsense2.option", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pyrealsense2.hole_filling_filter", "line_number": 18, "usage_type": "call"}, {"api_name": "pyrealsense2.temporal_filter", "line_number": 19, "usage_type": "call"}, {"api_name": "pyrealsense2.disparity_transform", "line_number": 21, "usage_type": "call"}, {"api_name": "pyrealsense2.disparity_transform", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2HSV", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "pyrealsense2.pipeline", "line_number": 84, "usage_type": "call"}, {"api_name": "pyrealsense2.config", "line_number": 86, "usage_type": "call"}, {"api_name": "pyrealsense2.stream", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.asanyarray", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 113, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 114, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "pyrealsense2.colorizer", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "pyrealsense2.align", "line_number": 122, "usage_type": "call"}, {"api_name": "pyrealsense2.stream", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.asanyarray", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.dnn.readNetFromCaffe", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 145, "usage_type": "attribute"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 176, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 178, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "cv2.rectangle", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.asanyarray", "line_number": 197, "usage_type": "call"}, {"api_name": "pyrealsense2.rs2_deproject_pixel_to_point", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "327891867", "text": "from django.db import models\nimport pymysql as MySQLdb1\nimport pymysql\nfrom django.shortcuts import render\n\n\ndata={\n 'books':[]\n}\n\n# Create your models here.\nclass Connection:\n def __init__(self, user, password, db, host='127.0.0.1'):\n self.user = user\n self.host = host\n self.password = password\n self.db = db\n self.charset=\"utf8\"\n self._connection =None\n\n @property\n def connection(self):\n return self._connection\n\n def __enter__(self):\n self.connect()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.disconnect()\n\n def connect(self):\n\n if not self._connection:\n self._connection = MySQLdb1.connect(\n user = self.user,\n host = self.host,\n passwd = self.password,\n db = self.db,\n )\n return self._connection\n def disconnect(self):\n if self._connection:\n self._connection.close()\n\n\nclass Book:\n def __init__(self,db_connection,id=1,name=\"a\",author=\"a\",description=\"a\"):\n self.db_connection=db_connection#.connection()\n self.id=id\n self.name=name\n self.author=author\n self.description=description\n #self.c=self.db_connection.cursor(pymysql.cursors.DictCursor)\n #self.c = self.db_connection.cursor()\n\n def save(self):\n c = self.db_connection.cursor(pymysql.cursors.DictCursor)\n #c.execute(\"DELETE FROM Books WHERE id>0\")\n c.execute(\"\"\"INSERT INTO Books\n (id,name, author, description)\n VALUES\n (%s,%s, %s, %s)\"\"\",\n #(%s,%s, %s, %s)\"\"\",\n (self.id,self.name, self.author, self.description)\n )\n\n self.db_connection.commit()\n c.close()\n def _get_all(self):\n c = self.db_connection.cursor(pymysql.cursors.DictCursor)\n c.execute(\"SELECT * FROM Books\")\n return c.fetchall()\n def _get_one(self,req_id):\n c = self.db_connection.cursor(pymysql.cursors.DictCursor)\n c.execute(\"SELECT * FROM Books WHERE id =\"+str(req_id))\n return c.fetchall()\n def _clear_all(self,request):\n c = self.db_connection.cursor(pymysql.cursors.DictCursor)\n c.execute(\"DELETE FROM Books\")\n #c.execute(\"UPDATE Books\")\n c.execute(\"SELECT * FROM Books\")\n self.db_connection.commit()\n\n check_all=c.fetchall()\n #check_all=c.self._get_all()\n data['books']=[]\n for check in check_all:\n data['books'].append(check)\n c.close()\n return render(request, 'books.html',data)\n\n\nclass Shop(models.Model):\n name=models.CharField(max_length=20)\n\n\n'''\ncon = Connection(\"root\",\"toshiba19\",\"lab6\")\ndb_connection=con.connect()\nb=Book(db_connection,1,\"Fan\", \"Asic Asimov\", \"simple\")\nb.save()\nb=Book(db_connection,2,\"Fan\", \"Asic Asimov\", \"simple\")\nb.save()\nfor i in b._get_all():\n print(i)\n\nprint('aaaa')\n\nfor i in b._get_one(2):\n print(i)\n\n'''", "sub_path": "lab6_last/lab6_last/app1/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pymysql.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 94, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "187603136", "text": "from __future__ import absolute_import\n\nimport re\n\nimport pygments\nfrom pygments.filter import apply_filters\nfrom pygments.filter import Filter\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.formatters import LatexFormatter\nfrom pygments.lexer import bygroups\nfrom pygments.lexer import RegexLexer\nfrom pygments.lexer import using\nfrom pygments.lexer import words\nfrom pygments.lexers import PythonConsoleLexer\nfrom pygments.lexers import PythonLexer\nfrom pygments.lexers import SqlLexer\nfrom pygments.token import Keyword\nfrom pygments.token import Token\nfrom sphinx import highlighting\nfrom sphinx.highlighting import PygmentsBridge\n\n\ndef _strip_trailing_whitespace(iter_):\n buf = list(iter_)\n if buf:\n buf[-1] = (buf[-1][0], buf[-1][1].rstrip())\n for t, v in buf:\n yield t, v\n\n\nclass RealWorldSQLLexer(SqlLexer):\n tokens = {k: l[:] for (k, l) in SqlLexer.tokens.items()}\n\n tokens[\"root\"].insert(0, (words((\"RETURNING\",), suffix=r\"\\b\"), Keyword))\n\n\nclass StripDocTestFilter(Filter):\n def filter(self, lexer, stream):\n for ttype, value in stream:\n if (\n ttype is Token.Comment or ttype.parent is Token.Comment\n ) and re.match(r\"#\\s*doctest:\", value):\n continue\n yield ttype, value\n\n\nclass DetectAnnotationsFilter(Filter):\n def filter(self, lexer, stream):\n first, second = None, None\n found_colon = False\n should_report = False\n annotated = None\n found_sql = False\n\n for ttype, value in stream:\n\n # any encounting of SQL blocks, stop immediately. This is\n # likely not a class def example and we don't want the\n # \"anno/non-anno\" label to appear under SQL boxes at all\n if ttype is Token.Name and value in (\n \"execsql\",\n \"printsql\",\n \"opensql\",\n \"sqlpopup\",\n ):\n found_sql = True\n should_report = False\n\n if found_sql:\n yield ttype, value\n continue\n\n if ttype is Token.Name.Builtin:\n ttype = Token.Name\n\n if ttype is Token.Keyword and value == \"class\":\n should_report = True\n\n first = second\n second = ttype, value\n\n yield ttype, value\n\n if annotated:\n continue\n elif annotated is None and ttype is not Token.Text:\n annotated = False\n\n if (first, second) == ARROW_ANNOTATION:\n annotated = True\n elif found_colon:\n if (ttype, value) == NEWLINE:\n found_colon = False\n elif ttype == Token.Name:\n found_colon = False\n annotated = True\n elif first and ((first[0:1], second) == COLON_ANNOTATION):\n found_colon = True\n # should_report = True\n\n # report only on examples that have class defs\n if annotated is not None and should_report:\n yield Token.Other, f\"pep484 annotations detected: {annotated}\"\n\n\nclass PyConWithSQLLexer(RegexLexer):\n name = \"PyCon+SQL\"\n aliases = [\"pycon+sql\"]\n\n flags = re.IGNORECASE | re.DOTALL\n\n tokens = {\n \"root\": [\n (r\"{sql}\", Token.Sql.Link, \"sqlpopup\"),\n (r\"{execsql}\", Token.Sql.Exec, \"execsql\"),\n (r\"{opensql}\", Token.Sql.Exec, \"opensql\"), # alias of execsql\n (r\"{printsql}\", Token.Sql.Print, \"printsql\"),\n (r\".*?\\n\", using(PythonConsoleLexer)),\n ],\n \"sqlpopup\": [\n (\n r\"(.*?\\n)((?:PRAGMA|BEGIN|WITH|SE\\.\\.\\.|SELECT|INSERT|\"\n \"DELETE|ROLLBACK|\"\n \"COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA\"\n \"|DESCRIBE).*?(?:{stop}\\n?|$))\",\n bygroups(using(PythonConsoleLexer), Token.Sql.Popup),\n \"#pop\",\n )\n ],\n \"execsql\": [(r\".*?(?:{stop}\\n*|$)\", Token.Sql.ExecState, \"#pop\")],\n \"opensql\": [(r\".*?(?:{stop}\\n*|$)\", Token.Sql.ExecState, \"#pop\")],\n \"printsql\": [(r\".*?(?:{stop}\\n*|$)\", Token.Sql.PrintState, \"#pop\")],\n }\n\n\nclass PythonWithSQLLexer(RegexLexer):\n name = \"Python+SQL\"\n aliases = [\"python+sql\"]\n\n flags = re.IGNORECASE | re.DOTALL\n\n tokens = {\n \"root\": [\n (r\"{sql}\", Token.Sql.Link, \"sqlpopup\"),\n (r\"{execsql}\", Token.Sql.Exec, \"execsql\"),\n (r\"{opensql}\", Token.Sql.Exec, \"opensql\"), # alias of execsql\n (r\"{printsql}\", Token.Sql.Print, \"printsql\"),\n (r\".*?\\n\", using(PythonLexer)),\n ],\n \"sqlpopup\": [\n (\n r\"(.*?\\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK\"\n \"|COMMIT|ALTER|UPDATE|CREATE|DROP\"\n \"|PRAGMA|DESCRIBE).*?(?:{stop}\\n?|$))\",\n bygroups(using(PythonLexer), Token.Sql.Popup),\n \"#pop\",\n )\n ],\n \"execsql\": [(r\".*?(?:{stop}\\n*|$)\", Token.Sql.ExecState, \"#pop\")],\n \"opensql\": [(r\".*?(?:{stop}\\n*|$)\", Token.Sql.ExecState, \"#pop\")],\n \"printsql\": [(r\".*?(?:{stop}\\n*|$)\", Token.Sql.PrintState, \"#pop\")],\n }\n\n\nclass PopupSQLFormatter(HtmlFormatter):\n def _format_lines(self, tokensource):\n sql_lexer = RealWorldSQLLexer()\n\n formatter = HtmlFormatter(nowrap=True)\n buf = []\n for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]):\n if ttype in Token.Sql:\n\n for t, v in HtmlFormatter._format_lines(self, iter(buf)):\n yield t, v\n buf = []\n\n if ttype in (Token.Sql.ExecState, Token.Sql.PrintState):\n class_ = (\n \"show_sql\"\n if ttype is Token.Sql.ExecState\n else \"show_sql_print\"\n )\n yield (\n 1,\n f\"
%s
\"\n % pygments.highlight(\n re.sub(r\"(?:{stop}|\\n+)\\s*$\", \"\", value),\n sql_lexer,\n formatter,\n ),\n )\n elif ttype is Token.Sql.Link:\n yield 1, \"sql\"\n elif ttype is Token.Sql.Popup:\n yield (\n 1,\n \"\"\n % pygments.highlight(\n re.sub(r\"(?:{stop}|\\n+)$\", \"\", value),\n sql_lexer,\n formatter,\n ),\n )\n else:\n buf.append((ttype, value))\n\n for t, v in _strip_trailing_whitespace(\n HtmlFormatter._format_lines(self, iter(buf))\n ):\n yield t, v\n\n\nclass PopupLatexFormatter(LatexFormatter):\n def _filter_tokens(self, tokensource):\n for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]):\n if ttype in Token.Sql:\n if ttype not in (\n Token.Sql.Link,\n Token.Sql.Exec,\n Token.Sql.Print,\n ):\n yield Token.Literal, re.sub(r\"{stop}\", \"\", value)\n else:\n continue\n else:\n yield ttype, value\n\n def format(self, tokensource, outfile):\n LatexFormatter.format(self, self._filter_tokens(tokensource), outfile)\n\n\nARROW_ANNOTATION = (\n (Token.Operator, \"-\"),\n (Token.Operator, \">\"),\n)\nCOLON_ANNOTATION = (\n (Token.Name,),\n (Token.Punctuation, \":\"),\n)\n\nNEWLINE = (Token.Text, \"\\n\")\n\n\nclass DetectAnnotationsFormatterMixin:\n annotated = None\n\n def _format_lines(self, tokensource):\n\n self.annotated = None\n\n def go(tokensource):\n for ttype, value in tokensource:\n if ttype is Token.Other and value.startswith(\n \"pep484 annotations detected:\"\n ):\n self.annotated = (\n value == \"pep484 annotations detected: True\"\n )\n continue\n\n yield ttype, value\n\n for level, tag in super()._format_lines(go(tokensource)):\n yield level, tag\n\n def _wrap_pre(self, inner):\n for level, tag in super()._wrap_pre(inner):\n yield level, tag\n\n if level == 0 and self.annotated is not None and tag == \"\":\n yield (\n 1,\n '
'\n if self.annotated\n else '
',\n )\n\n def _wrap_code(self, inner):\n\n for level, tag in super()._wrap_code(inner):\n yield level, tag\n\n if level == 0 and self.annotated is not None and tag == \"\":\n yield (\n 1,\n '
'\n if self.annotated\n else '
',\n )\n\n\nclass AnnoPopupSQLFormatter(\n DetectAnnotationsFormatterMixin, PopupSQLFormatter\n):\n pass\n\n\ndef setup_formatters(app, config):\n if config.zzzeeksphinx_annotation_key:\n PygmentsBridge.html_formatter = AnnoPopupSQLFormatter\n filters = [DetectAnnotationsFilter()]\n else:\n PygmentsBridge.html_formatter = PopupSQLFormatter\n filters = []\n\n highlighting.lexers[\"sql\"] = RealWorldSQLLexer()\n\n highlighting.lexers[\"python\"] = highlighting.lexers[\n \"python3\"\n ] = PythonLexer(filters=filters)\n highlighting.lexers[\"pycon\"] = highlighting.lexers[\n \"pycon3\"\n ] = PythonConsoleLexer(filters=filters)\n highlighting.lexers[\"python+sql\"] = PythonWithSQLLexer(filters=filters)\n highlighting.lexers[\"pycon+sql\"] = PyConWithSQLLexer(filters=filters)\n\n PygmentsBridge.latex_formatter = PopupLatexFormatter\n\n\ndef setup(app):\n\n # pass lexer class instead of lexer instance\n app.add_lexer(\"pycon+sql\", PyConWithSQLLexer)\n app.add_lexer(\"python+sql\", PythonWithSQLLexer)\n\n app.add_config_value(\"zzzeeksphinx_annotation_key\", None, \"env\")\n\n app.connect(\"config-inited\", setup_formatters)\n", "sub_path": "zzzeeksphinx/sqlformatter.py", "file_name": "sqlformatter.py", "file_ext": "py", "file_size_in_byte": 10527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygments.lexers.SqlLexer", "line_number": 31, "usage_type": "name"}, {"api_name": "pygments.lexers.SqlLexer.tokens.items", "line_number": 32, "usage_type": "call"}, {"api_name": "pygments.lexers.SqlLexer.tokens", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygments.lexers.SqlLexer", "line_number": 32, "usage_type": "name"}, {"api_name": "pygments.lexer.words", "line_number": 34, "usage_type": "call"}, {"api_name": "pygments.token.Keyword", "line_number": 34, "usage_type": "name"}, {"api_name": "pygments.filter.Filter", "line_number": 37, "usage_type": "name"}, {"api_name": "pygments.token.Token.Comment", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 41, "usage_type": "name"}, {"api_name": "re.match", "line_number": 42, "usage_type": "call"}, {"api_name": "pygments.filter.Filter", "line_number": 47, "usage_type": "name"}, {"api_name": "pygments.token.Token.Name", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 60, "usage_type": "name"}, {"api_name": "pygments.token.Token.Name", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 73, "usage_type": "name"}, {"api_name": "pygments.token.Token.Name", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 74, "usage_type": "name"}, {"api_name": "pygments.token.Token.Keyword", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 76, "usage_type": "name"}, {"api_name": "pygments.token.Token.Text", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 86, "usage_type": "name"}, {"api_name": "pygments.token.Token.Name", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 94, "usage_type": "name"}, {"api_name": "pygments.token.Token.Other", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 103, "usage_type": "name"}, {"api_name": "pygments.lexer.RegexLexer", "line_number": 106, "usage_type": "name"}, {"api_name": "re.IGNORECASE", "line_number": 110, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pygments.token.Token.Sql", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 114, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 115, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 116, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 117, "usage_type": "name"}, {"api_name": "pygments.lexer.using", "line_number": 118, "usage_type": "call"}, {"api_name": "pygments.lexers.PythonConsoleLexer", "line_number": 118, "usage_type": "argument"}, {"api_name": "pygments.lexer.bygroups", "line_number": 126, "usage_type": "call"}, {"api_name": "pygments.lexer.using", "line_number": 126, "usage_type": "call"}, {"api_name": "pygments.lexers.PythonConsoleLexer", "line_number": 126, "usage_type": "argument"}, {"api_name": "pygments.token.Token.Sql", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 126, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 130, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 131, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 132, "usage_type": "name"}, {"api_name": "pygments.lexer.RegexLexer", "line_number": 136, "usage_type": "name"}, {"api_name": "re.IGNORECASE", "line_number": 140, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygments.token.Token.Sql", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 144, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 145, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 146, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 147, "usage_type": "name"}, {"api_name": "pygments.lexer.using", "line_number": 148, "usage_type": "call"}, {"api_name": "pygments.lexers.PythonLexer", "line_number": 148, "usage_type": "argument"}, {"api_name": "pygments.lexer.bygroups", "line_number": 155, "usage_type": "call"}, {"api_name": "pygments.lexer.using", "line_number": 155, "usage_type": "call"}, {"api_name": "pygments.lexers.PythonLexer", "line_number": 155, "usage_type": "argument"}, {"api_name": "pygments.token.Token.Sql", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 155, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 159, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 160, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 161, "usage_type": "name"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 165, "usage_type": "name"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 169, "usage_type": "call"}, {"api_name": "pygments.filter.apply_filters", "line_number": 171, "usage_type": "call"}, {"api_name": "pygments.token.Token.Sql", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 172, "usage_type": "name"}, {"api_name": "pygments.formatters.HtmlFormatter._format_lines", "line_number": 174, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 174, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 178, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 181, "usage_type": "name"}, {"api_name": "pygments.highlight", "line_number": 187, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 188, "usage_type": "call"}, {"api_name": "pygments.token.Token.Sql", "line_number": 193, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 193, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 195, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 195, "usage_type": "name"}, {"api_name": "pygments.highlight", "line_number": 199, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 200, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter._format_lines", "line_number": 209, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 209, "usage_type": "name"}, {"api_name": "pygments.formatters.LatexFormatter", "line_number": 214, "usage_type": "name"}, {"api_name": "pygments.filter.apply_filters", "line_number": 216, "usage_type": "call"}, {"api_name": "pygments.token.Token.Sql", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 217, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 219, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 219, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 220, "usage_type": "name"}, {"api_name": "pygments.token.Token.Sql", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 221, "usage_type": "name"}, {"api_name": "pygments.token.Token.Literal", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 223, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 223, "usage_type": "call"}, {"api_name": "pygments.formatters.LatexFormatter.format", "line_number": 230, "usage_type": "call"}, {"api_name": "pygments.formatters.LatexFormatter", "line_number": 230, "usage_type": "name"}, {"api_name": "pygments.token.Token.Operator", "line_number": 234, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 234, "usage_type": "name"}, {"api_name": "pygments.token.Token.Operator", "line_number": 235, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 235, "usage_type": "name"}, {"api_name": "pygments.token.Token.Name", "line_number": 238, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 238, "usage_type": "name"}, {"api_name": "pygments.token.Token.Punctuation", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 239, "usage_type": "name"}, {"api_name": "pygments.token.Token.Text", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 242, "usage_type": "name"}, {"api_name": "pygments.token.Token.Other", "line_number": 254, "usage_type": "attribute"}, {"api_name": "pygments.token.Token", "line_number": 254, "usage_type": "name"}, {"api_name": "sphinx.highlighting.PygmentsBridge.html_formatter", "line_number": 301, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting.PygmentsBridge", "line_number": 301, "usage_type": "name"}, {"api_name": "sphinx.highlighting.PygmentsBridge.html_formatter", "line_number": 304, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting.PygmentsBridge", "line_number": 304, "usage_type": "name"}, {"api_name": "sphinx.highlighting.lexers", "line_number": 307, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting", "line_number": 307, "usage_type": "name"}, {"api_name": "sphinx.highlighting.lexers", "line_number": 309, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting", "line_number": 309, "usage_type": "name"}, {"api_name": "pygments.lexers.PythonLexer", "line_number": 311, "usage_type": "call"}, {"api_name": "sphinx.highlighting.lexers", "line_number": 312, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting", "line_number": 312, "usage_type": "name"}, {"api_name": "pygments.lexers.PythonConsoleLexer", "line_number": 314, "usage_type": "call"}, {"api_name": "sphinx.highlighting.lexers", "line_number": 315, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting", "line_number": 315, "usage_type": "name"}, {"api_name": "sphinx.highlighting.lexers", "line_number": 316, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting", "line_number": 316, "usage_type": "name"}, {"api_name": "sphinx.highlighting.PygmentsBridge.latex_formatter", "line_number": 318, "usage_type": "attribute"}, {"api_name": "sphinx.highlighting.PygmentsBridge", "line_number": 318, "usage_type": "name"}]} +{"seq_id": "582138022", "text": "# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Copyright (c) 2019 HERE Europe B.V.\n#\n# SPDX-License-Identifier: MIT\n# License-Filename: LICENSE\n#\n###############################################################################\n\nimport logging\nimport time\n\nfrom qgis.core import QgsProject, QgsApplication\nfrom qgis.core import Qgis, QgsMessageLog\n\nfrom qgis.PyQt.QtCore import QCoreApplication, Qt\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtWidgets import QAction, QToolButton, QWidgetAction\nfrom qgis.PyQt.QtWidgets import QProgressBar, QSizePolicy\n\nfrom . import config\nfrom . import utils\n\nfrom .gui.space_dialog import ConnectManageSpaceDialog, ManageSpaceDialog\nfrom .gui.space_info_dialog import EditSpaceDialog, UploadNewSpaceDialog\nfrom .gui.util_dialog import ConfirmDialog\nfrom .gui.basemap_dialog import BaseMapDialog\n\nfrom .models import SpaceConnectionInfo, TokenModel, GroupTokenModel\nfrom .modules.controller import ChainController\nfrom .modules.controller import AsyncFun, parse_qt_args, make_qt_args, make_fun_args\nfrom .modules.controller.manager import ControllerManager\n\nfrom .modules import loader\nfrom .modules.space_loader import *\nfrom .modules.refactor_loader import *\n\nfrom .modules.layer.manager import LayerManager\nfrom .modules.layer import bbox_utils\n\nfrom .modules.network import NetManager\n\nfrom .modules import basemap\nfrom .modules.basemap.auth_manager import AuthManager\n\nfrom .modules.common.error import format_traceback\n\n\nPLUGIN_NAME = config.PLUGIN_NAME\nTAG_PLUGIN = \"XYZ Hub\"\n\nDEBUG = 1\n\nfrom .modules.common.signal import make_print_qgis, close_print_qgis\nprint_qgis = make_print_qgis(TAG_PLUGIN,debug=True)\n\n\nclass XYZHubConnector(object):\n\n \"\"\"base plugin\"\"\"\n\n def __init__(self, iface):\n \"\"\"init\"\"\"\n import sys\n print(sys.version)\n self.iface = iface\n self.web_menu = \"&XYZ Hub Connector\"\n self.init_modules()\n self.obj = self\n\n def initGui(self):\n \"\"\"startup\"\"\"\n\n parent = self.iface.mainWindow()\n\n ######## action, button\n\n icon = QIcon(\"%s/%s\" % (config.PLUGIN_DIR,\"images/xyz.png\"))\n icon_bbox = QIcon(\"%s/%s\" % (config.PLUGIN_DIR,\"images/bbox.svg\"))\n self.action_connect = QAction(icon, \"New XYZ Hub Connection\", parent)\n self.action_connect.setWhatsThis(\n QCoreApplication.translate(PLUGIN_NAME, \"WhatsThis message\" ))\n self.action_connect.setStatusTip(\n QCoreApplication.translate(PLUGIN_NAME, \"status tip message\" ))\n\n self.action_clear_cache = QAction(\"Clear cache\", parent)\n self.action_upload = QAction(\"Upload to New XYZ Geospace\", parent)\n self.action_basemap = QAction(\"Add HERE Map Tile\", parent)\n\n\n self.action_magic_sync = QAction(\"Magic Sync (EXPERIMENTAL)\", parent)\n self.action_manage = QAction(\"Manage XYZ Geospace (EXPERIMENTAL)\", parent)\n self.action_edit = QAction(\"Edit/Delete XYZ Geospace (EXPERIMENTAL)\", parent)\n\n if self.iface.activeLayer() is None:\n # self.action_upload.setEnabled(False)\n self.action_edit.setEnabled(False)\n self.action_magic_sync.setEnabled(False)\n\n # self.action_magic_sync.setVisible(False) # disable magic sync\n\n ######## CONNECT action, button\n\n self.action_connect.triggered.connect(self.open_connection_dialog)\n self.action_manage.triggered.connect(self.open_manage_dialog)\n self.action_edit.triggered.connect(self.open_edit_dialog)\n self.action_upload.triggered.connect(self.open_upload_dialog)\n self.action_magic_sync.triggered.connect(self.open_magic_sync_dialog)\n self.action_clear_cache.triggered.connect(self.open_clear_cache_dialog)\n self.action_basemap.triggered.connect(self.open_basemap_dialog)\n\n ######## Add the toolbar + button\n self.toolbar = self.iface.addToolBar(PLUGIN_NAME)\n self.toolbar.setObjectName(\"XYZ Hub Connector\")\n\n tool_btn = QToolButton(self.toolbar)\n\n self.actions = [self.action_connect, self.action_upload, self.action_basemap, self.action_clear_cache] # , self.action_magic_sync, self.action_manage, self.action_edit\n for a in self.actions:\n tool_btn.addAction(a)\n self.iface.addPluginToWebMenu(self.web_menu, a)\n\n tool_btn.setDefaultAction(self.action_connect)\n tool_btn.setPopupMode(tool_btn.MenuButtonPopup)\n\n self.xyz_widget_action = self.toolbar.addWidget(tool_btn)\n\n self.action_help = None\n \n self.action_reload = QAction(icon_bbox, \"Reload BBox\", parent)\n self.action_reload.triggered.connect(self.layer_reload_bbox)\n self.action_reload.setVisible(False) # disable\n self.toolbar.addAction(self.action_reload)\n\n progress = QProgressBar()\n progress.setMinimum(0)\n progress.setMaximum(0)\n progress.reset()\n progress.hide()\n # progress = self.iface.statusBarIface().children()[2] # will be hidden by qgis\n self.iface.statusBarIface().addPermanentWidget(progress)\n self.pb = progress\n\n def init_modules(self):\n # util.init_module()\n\n # parent = self.iface.mainWindow()\n parent = QgsProject.instance()\n\n ######## Init xyz modules\n self.map_basemap_meta = basemap.load_default_xml()\n self.auth_manager = AuthManager(config.USER_PLUGIN_DIR +\"/auth.ini\")\n \n self.token_model = GroupTokenModel(parent)\n # self.layer = LayerManager(parent, self.iface)\n\n self.network = NetManager(parent)\n \n self.con_man = ControllerManager()\n self.layer_man = LayerManager()\n\n ######## data flow\n self.conn_info = SpaceConnectionInfo()\n \n ######## token \n self.token_model.load_ini(config.USER_PLUGIN_DIR +\"/token.ini\")\n\n ######## CALLBACK\n \n self.con_man.ld_pool.signal.progress.connect( self.cb_progress_busy) #, Qt.QueuedConnection\n self.con_man.ld_pool.signal.finished.connect( self.cb_progress_done)\n \n QgsProject.instance().layersWillBeRemoved[\"QStringList\"].connect( self.layer_man.remove)\n QgsProject.instance().layersWillBeRemoved[\"QStringList\"].connect( self.con_man.remove)\n\n # self.iface.currentLayerChanged.connect( self.cb_layer_selected) # UNCOMMENT\n\n if DEBUG:\n QgsApplication.messageLog().messageReceived.connect(print_qgis)\n\n def unload_modules(self):\n # self.con_man.disconnect_ux( self.iface)\n QgsProject.instance().layersWillBeRemoved[\"QStringList\"].disconnect( self.layer_man.remove)\n QgsProject.instance().layersWillBeRemoved[\"QStringList\"].disconnect( self.con_man.remove)\n\n # utils.disconnect_silent(self.iface.currentLayerChanged)\n\n # self.iface.mapCanvas().extentsChanged.disconnect( self.debug_reload)\n\n close_print_qgis()\n pass\n def unload(self):\n \"\"\"teardown\"\"\"\n self.unload_modules()\n # remove the plugin menu item and icon\n self.iface.removePluginWebMenu(self.web_menu, self.action_help)\n\n self.toolbar.clear() # remove action from custom toolbar (toolbar still exist)\n self.toolbar.deleteLater()\n\n for a in self.actions:\n self.iface.removePluginWebMenu(self.web_menu, a)\n\n\n ############### \n # Callback\n ###############\n def cb_layer_selected(self, qlayer):\n flag_xyz = True if qlayer is not None and self.layer.is_xyz_supported_layer(qlayer) else False\n # disable magic sync\n # self.action_magic_sync.setEnabled(flag_xyz)\n flag_layer = True\n self.action_upload.setEnabled(flag_layer)\n self.action_edit.setEnabled(flag_layer)\n \n ############### \n # Callback of action (main function)\n ###############\n def cb_success_msg(self, msg, info=\"\"):\n self.iface.messageBar().pushMessage(\n msg, info, \n Qgis.Success, 1\n )\n\n def make_cb_success(self, msg, info=\"\"):\n def _cb_success_msg():\n txt = info\n self.cb_success_msg(msg, txt)\n return _cb_success_msg\n\n def cb_handle_error_msg(self, e):\n err = parse_exception_obj(e)\n if isinstance(err, ChainInterrupt):\n e0, idx = err.args[0:2]\n if isinstance(e0, net_handler.NetworkError):\n ok = self.show_net_err_dialog(e0)\n if ok: return\n elif isinstance(e0, loader.EmptyXYZSpaceError):\n ret = exec_warning_dialog(\"Warning\",\"Requested query returns no features\")\n self.show_err_msgbar(err)\n\n def show_net_err_dialog(self, err):\n assert isinstance(err, net_handler.NetworkError)\n reply_tag, status, reason, body = err.args[:4]\n if reply_tag in [\"count\"]: # too many error\n return 0\n \n msg = (\n \"%s: %s\\n\"%(status,reason) + \n \"There was a problem connecting to the server\"\n )\n if status == 403:\n msg += \"\\n\\n\" + \"Please make sure that the token has WRITE permission\"\n ret = exec_warning_dialog(\"Network Error\",msg, body)\n return 1\n\n def show_err_msgbar(self, err):\n self.iface.messageBar().pushMessage(\n TAG_PLUGIN, repr(err), \n Qgis.Warning, 5\n )\n msg = format_traceback(err)\n QgsMessageLog.logMessage( msg, TAG_PLUGIN, Qgis.Warning)\n\n def cb_progress_busy(self, n_active):\n if n_active > 1: return\n self.flag_pb_show=True\n self.cb_progress_refresh()\n\n def cb_progress_done(self):\n self.flag_pb_show=False\n self.cb_progress_refresh()\n\n def cb_progress_refresh(self):\n if not hasattr(self,\"flag_pb_show\"): return\n\n pb = self.pb\n if self.flag_pb_show:\n pb.show()\n print_qgis(\"show\",pb)\n else:\n pb.hide() \n print_qgis(\"hide\")\n \n ############### \n # Action (main function)\n ###############\n def load_bbox(self, con, args):\n bbox = bbox_utils.extend_to_bbox(bbox_utils.get_bounding_box(self.iface))\n a, kw = parse_qt_args(args)\n kw[\"bbox\"] = bbox\n kw[\"limit\"] = 1000\n con.start(*a, **kw)\n def layer_reload_bbox(self):\n con_bbox_reload = ReloadLayerController_bbox(self.network)\n self.con_man.add(con_bbox_reload)\n # con_bbox_reload.signal.finished.connect( self.refresh_canvas, Qt.QueuedConnection)\n con_bbox_reload.signal.finished.connect( self.make_cb_success(\"Bounding box loading finish\") )\n con_bbox_reload.signal.error.connect( self.cb_handle_error_msg )\n\n # TODO: set/get params from vlayer\n layer_id = self.iface.activeLayer().id()\n layer = self.layer_man.get(layer_id)\n self.load_bbox(con_bbox_reload, make_qt_args(layer))\n \n # UNUSED\n def refresh_canvas(self):\n # self.iface.activeLayer().triggerRepaint()\n self.iface.mapCanvas().refresh()\n def previous_canvas_extent(self):\n self.iface.mapCanvas().zoomToPreviousExtent()\n #\n \n def open_clear_cache_dialog(self):\n parent = self.iface.mainWindow()\n dialog = ConfirmDialog(parent, \"Delete cache will make loaded layer unusable !!\")\n ret = dialog.exec_()\n if ret != dialog.Ok: return\n \n utils.clear_cache()\n\n def open_connection_dialog(self):\n parent = self.iface.mainWindow()\n dialog = ConnectManageSpaceDialog(parent)\n dialog.config(self.token_model, self.conn_info)\n\n ############ edit btn \n\n con = EditSpaceController(self.network)\n self.con_man.add_background(con)\n con.signal.finished.connect( dialog.btn_use.clicked.emit )\n con.signal.error.connect( self.cb_handle_error_msg )\n dialog.signal_edit_space.connect( con.start_args)\n\n ############ delete btn \n\n con = DeleteSpaceController(self.network)\n self.con_man.add_background(con)\n con.signal.results.connect( dialog.btn_use.clicked.emit )\n con.signal.error.connect( self.cb_handle_error_msg )\n dialog.signal_del_space.connect( con.start_args)\n\n ############ Use Token btn \n \n con = LoadSpaceController(self.network)\n self.con_man.add(con)\n con.signal.results.connect( make_fun_args(dialog.cb_display_spaces) )\n con.signal.error.connect( self.cb_handle_error_msg )\n con.signal.error.connect( lambda e: dialog.cb_enable_token_ui() )\n con.signal.finished.connect( dialog.cb_enable_token_ui )\n dialog.signal_use_token.connect( con.start_args)\n\n ############ get statisitics \n con = StatSpaceController(self.network)\n self.con_man.add(con)\n con.signal.results.connect( make_fun_args(dialog.cb_display_space_count) )\n con.signal.error.connect( self.cb_handle_error_msg )\n dialog.signal_space_count.connect( con.start_args)\n \n ############ TODO: bbox btn \n\n ############ connect btn \n con_load = loader.ReloadLayerController(self.network, n_parallel=2)\n self.con_man.add_background(con_load)\n con_load.signal.finished.connect( self.make_cb_success(\"Loading finish\") )\n # con_load.signal.finished.connect( self.refresh_canvas, Qt.QueuedConnection)\n con_load.signal.error.connect( self.cb_handle_error_msg )\n\n dialog.signal_space_connect.connect( con_load.start_args)\n\n # con.signal.results.connect( self.layer_man.add_args) # IMPORTANT\n\n\n dialog.exec_()\n self.con_man.finish_fast()\n # self.startTime = time.time()\n\n def open_manage_dialog(self):\n pass\n\n def open_edit_dialog(self):\n pass\n\n def open_upload_dialog(self):\n vlayer = self.iface.activeLayer()\n parent = self.iface.mainWindow()\n dialog = UploadNewSpaceDialog(parent)\n dialog.config(self.token_model, self.network, vlayer)\n\n ############ Use Token btn\n con = LoadSpaceController(self.network)\n self.con_man.add(con)\n con.signal.results.connect( make_fun_args(dialog.cb_set_valid_token) ) # finished signal !?\n con.signal.error.connect( self.cb_handle_error_msg )\n con.signal.finished.connect( dialog.cb_enable_token_ui )\n dialog.signal_use_token.connect( con.start_args)\n\n\n con_upload = UploadLayerController(self.network, n_parallel=2)\n self.con_man.add_background(con_upload)\n con_upload.signal.finished.connect( self.make_cb_success(\"Uploading finish\") )\n con_upload.signal.error.connect( self.cb_handle_error_msg )\n\n con = InitUploadLayerController(self.network)\n self.con_man.add_background(con)\n\n dialog.signal_upload_new_space.connect( con.start_args)\n con.signal.results.connect( con_upload.start_args)\n con.signal.error.connect( self.cb_handle_error_msg )\n\n dialog.exec_()\n self.con_man.finish_fast()\n def open_magic_sync_dialog(self):\n pass\n\n def open_basemap_dialog(self):\n parent = self.iface.mainWindow()\n auth = self.auth_manager.get_auth()\n dialog = BaseMapDialog(parent)\n dialog.config(self.map_basemap_meta, auth)\n dialog.signal_add_basemap.connect( self.add_basemap_layer)\n\n dialog.exec_()\n def add_basemap_layer(self, args):\n a, kw = parse_qt_args(args)\n meta, app_id, app_code = a\n self.auth_manager.save(app_id, app_code)\n basemap.add_basemap_layer( meta, app_id, app_code)\n", "sub_path": "XYZHubConnector/plugin.py", "file_name": "plugin.py", "file_ext": "py", "file_size_in_byte": 15650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "modules.common.signal.make_print_qgis", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 66, "usage_type": "attribute"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 79, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtGui.QIcon", "line_number": 80, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 81, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QCoreApplication.translate", "line_number": 83, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QCoreApplication", "line_number": 83, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtCore.QCoreApplication.translate", "line_number": 85, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtCore.QCoreApplication", "line_number": 85, "usage_type": "name"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 87, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 88, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 89, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 92, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 93, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 94, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QToolButton", "line_number": 117, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QAction", "line_number": 131, "usage_type": "call"}, {"api_name": "qgis.PyQt.QtWidgets.QProgressBar", "line_number": 136, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 149, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 149, "usage_type": "name"}, {"api_name": "modules.basemap.load_default_xml", "line_number": 152, "usage_type": "call"}, {"api_name": "modules.basemap", "line_number": 152, "usage_type": "name"}, {"api_name": "modules.basemap.auth_manager.AuthManager", "line_number": 153, "usage_type": "call"}, {"api_name": "models.GroupTokenModel", "line_number": 155, "usage_type": "call"}, {"api_name": "modules.network.NetManager", "line_number": 158, "usage_type": "call"}, {"api_name": "modules.controller.manager.ControllerManager", "line_number": 160, "usage_type": "call"}, {"api_name": "modules.layer.manager.LayerManager", "line_number": 161, "usage_type": "call"}, {"api_name": "models.SpaceConnectionInfo", "line_number": 164, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 174, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 174, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 175, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 175, "usage_type": "name"}, {"api_name": "qgis.core.QgsApplication.messageLog", "line_number": 180, "usage_type": "call"}, {"api_name": "qgis.core.QgsApplication", "line_number": 180, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 184, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 184, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 185, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 185, "usage_type": "name"}, {"api_name": "modules.common.signal.close_print_qgis", "line_number": 191, "usage_type": "call"}, {"api_name": "qgis.core.Qgis.Success", "line_number": 223, "usage_type": "attribute"}, {"api_name": "qgis.core.Qgis", "line_number": 223, "usage_type": "name"}, {"api_name": "modules.loader.EmptyXYZSpaceError", "line_number": 239, "usage_type": "attribute"}, {"api_name": "modules.loader", "line_number": 239, "usage_type": "name"}, {"api_name": "qgis.core.Qgis.Warning", "line_number": 261, "usage_type": "attribute"}, {"api_name": "qgis.core.Qgis", "line_number": 261, "usage_type": "name"}, {"api_name": "modules.common.error.format_traceback", "line_number": 263, "usage_type": "call"}, {"api_name": "qgis.core.QgsMessageLog.logMessage", "line_number": 264, "usage_type": "call"}, {"api_name": "qgis.core.QgsMessageLog", "line_number": 264, "usage_type": "name"}, {"api_name": "qgis.core.Qgis.Warning", "line_number": 264, "usage_type": "attribute"}, {"api_name": "qgis.core.Qgis", "line_number": 264, "usage_type": "name"}, {"api_name": "modules.layer.bbox_utils.extend_to_bbox", "line_number": 290, "usage_type": "call"}, {"api_name": "modules.layer.bbox_utils", "line_number": 290, "usage_type": "name"}, {"api_name": "modules.layer.bbox_utils.get_bounding_box", "line_number": 290, "usage_type": "call"}, {"api_name": "modules.controller.parse_qt_args", "line_number": 291, "usage_type": "call"}, {"api_name": "modules.controller.make_qt_args", "line_number": 305, "usage_type": "call"}, {"api_name": "gui.util_dialog.ConfirmDialog", "line_number": 317, "usage_type": "call"}, {"api_name": "gui.space_dialog.ConnectManageSpaceDialog", "line_number": 325, "usage_type": "call"}, {"api_name": "modules.controller.make_fun_args", "line_number": 348, "usage_type": "call"}, {"api_name": "modules.controller.make_fun_args", "line_number": 357, "usage_type": "call"}, {"api_name": "modules.loader.ReloadLayerController", "line_number": 364, "usage_type": "call"}, {"api_name": "modules.loader", "line_number": 364, "usage_type": "name"}, {"api_name": "gui.space_info_dialog.UploadNewSpaceDialog", "line_number": 388, "usage_type": "call"}, {"api_name": "modules.controller.make_fun_args", "line_number": 394, "usage_type": "call"}, {"api_name": "gui.basemap_dialog.BaseMapDialog", "line_number": 420, "usage_type": "call"}, {"api_name": "modules.controller.parse_qt_args", "line_number": 426, "usage_type": "call"}, {"api_name": "modules.basemap.add_basemap_layer", "line_number": 429, "usage_type": "call"}, {"api_name": "modules.basemap", "line_number": 429, "usage_type": "name"}]} +{"seq_id": "332087991", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Estimator:\n def calc_metrics(self, tar_scores, imp_scores):\n min_score = np.minimum(np.min(tar_scores), np.min(imp_scores))\n max_score = np.maximum(np.max(tar_scores), np.max(imp_scores))\n\n n_tar = len(tar_scores)\n n_imp = len(imp_scores)\n\n N = 100\n err = 0\n mink = float('inf')\n\n fars = np.zeros((N,))\n frrs = np.zeros((N,))\n dists = np.zeros((N,))\n\n for i, dist in enumerate(np.linspace(min_score, max_score, N)):\n far = len(np.where(imp_scores > dist)[0]) / n_imp\n frr = len(np.where(tar_scores < dist)[0]) / n_tar\n fars[i] = far\n frrs[i] = frr\n dists[i] = dist\n\n k = np.abs(far - frr)\n if k < mink:\n mink = k\n err = (far + frr) / 2\n\n return err, fars, frrs, dists\n\n\n def scores_by_uniform(self, protocol):\n scores = np.random.uniform(-1, 1, (protocol.shape[0], protocol.shape[0]))\n return scores[protocol], scores[np.logical_not(protocol)]\n\n\n def scores_by_dist(self, data_x, protocol):\n data_x = data_x / np.linalg.norm(data_x, axis=1)[:, np.newaxis]\n scores = data_x @ data_x.T\n return scores[protocol], scores[np.logical_not(protocol)]\n\n def plot_err(self, fars, frrs, eer):\n\n plt.figure()\n plt.loglog(fars, frrs)\n plt.xlabel('fars')\n plt.ylabel('frrs')\n\n x = fars[::-1]\n y = fars[::-1]\n frrs = frrs[::-1]\n\n idy = float('inf')\n idx = 0\n for i in range(1, len(x)):\n t = np.minimum(np.abs(y[i] - frrs[i]), idy)\n (idx, idy) = (i, t) if t < idy else (idx, idy)\n\n plt.plot(x, y)\n plt.plot(x[idx], y[idx], 'ro')\n plt.text(x[idx] + 0.1, y[idx] + 0.1, 'ERR = ' + str(round(x[idx], 2)))\n\n print('EER: {}'.format(eer * 100))\n print('EER diff: {}'.format(abs(eer - y[idx])))\n\n plt.show()\n\n\n", "sub_path": "cnn_tpe/estimator.py", "file_name": "estimator.py", "file_ext": "py", "file_size_in_byte": 2016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.minimum", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.logical_not", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.logical_not", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.loglog", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.minimum", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "74021074", "text": "import model_repository\nimport plot_factory\nfrom config import constants\n\n\nclass Param:\n def __init__(self, history, name):\n self.history = history\n self.name = name\n\n\ndef plot_history(dir):\n history = model_repository.load_pickle(f\"{constants['results_dir']}/{dir}/history.pkl\")\n name = model_repository.load_json(f\"{constants['results_dir']}/{dir}/params.json\")['name']\n\n history.history['acc'] = history.history['acc'][0:10]\n history.history['loss'] = history.history['loss'][0:10]\n history.history['val_acc'] = history.history['val_acc'][0:10]\n history.history['val_loss'] = history.history['val_loss'][0:10]\n\n plot_factory.plot_single_history(Param(history.history, name))\n\n\ndef plot_histories(dirs):\n params = []\n for d in dirs:\n history = model_repository.load_pickle(f\"{constants['results_dir']}/{d}/history.pkl\")\n name = model_repository.load_json(f\"{constants['results_dir']}/{d}/params.json\")['name']\n params.append(Param(history.history, name))\n plot_factory.compare_histories(params, epochs=len(params[0].history['loss']))\n\n\n# plot_history('simple-cnn-1')\n# plot_histories(['simple-cnn-1', 'simple-cnn-2', 'simple-cnn-3'])\n# plot_histories(['simple-cnn-4-with-silence', 'simple-cnn-5-with-silence', 'simple-cnn-6-with-silence'])\n# plot_histories(['simple-cnn-10', 'simple-cnn-11', 'simple-cnn-12'])\n# plot_histories(['attention-rnn-1', 'attention-rnn-2', 'attention-rnn-3'])\nplot_history('resnet-10')\n", "sub_path": "src/reproduce_plots.py", "file_name": "reproduce_plots.py", "file_ext": "py", "file_size_in_byte": 1482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "model_repository.load_pickle", "line_number": 13, "usage_type": "call"}, {"api_name": "config.constants", "line_number": 13, "usage_type": "name"}, {"api_name": "model_repository.load_json", "line_number": 14, "usage_type": "call"}, {"api_name": "config.constants", "line_number": 14, "usage_type": "name"}, {"api_name": "plot_factory.plot_single_history", "line_number": 21, "usage_type": "call"}, {"api_name": "model_repository.load_pickle", "line_number": 27, "usage_type": "call"}, {"api_name": "config.constants", "line_number": 27, "usage_type": "name"}, {"api_name": "model_repository.load_json", "line_number": 28, "usage_type": "call"}, {"api_name": "config.constants", "line_number": 28, "usage_type": "name"}, {"api_name": "plot_factory.compare_histories", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "350782400", "text": "import hashlib\nfrom datetime import datetime\nimport time\nimport requests\nfrom api.db import crawl_db, relation_db, app_db, AppCollections, CrawlCollections, RelationCollections, \\\n stat_db, StatCollections, ASCENDING, DESCENDING\nfrom api.config import Conf\n\n\ndef generateMyTokenMD5(timestamp):\n str = 'thalesky_eos_' + timestamp\n h1 = hashlib.md5()\n h1.update(str.encode(encoding='utf-8'))\n return h1.hexdigest()\n\n\ndef requestMyTokenMsg(symbol):\n \"\"\"\n 向mytoken发送数据请求获取ioc的时间和价格\n :param symbol: 货币的简称\n :return:\n \"\"\"\n com_id = symbol + '_cny'\n device_model = 'MI MAX'\n device_os = '6.0.1'\n device_token = 'AuZOU902DNVPYWIzy6oNSRsNcUljlqC5P6MNF-5wc5YQ'\n language = 'zh_CN'\n legal_currency = 'CNY'\n market_id = '1303'\n market_name = 'cmc'\n mytoken = 'f5c860eb5921fe098ffd224f21cf1f56'\n platform = 'android'\n udid = 'ffffffff-dcee-ce16-ffff-ffffaa541fb7'\n v = '1.8.0'\n\n timenow = time.time()\n timestamp = (int(timenow))\n code = generateMyTokenMD5(str(timestamp))\n\n params = {\n \"timestamp\": timestamp,\n \"code\": code,\n \"com_id\": com_id,\n \"device_model\": device_model,\n \"device_os\": device_os,\n \"device_token\": device_token,\n \"language\": language,\n \"legal_currency\": legal_currency,\n \"market_id\": market_id,\n \"market_name\": market_name,\n \"mytoken\": mytoken,\n \"platform\": platform,\n \"udid\": udid,\n \"v\": v\n }\n URL = 'http://api.lb.mytoken.org/currency/currencydetail'\n rsMsg = requests.get(URL, params=params).json()\n\n return rsMsg\n\n\ndef getAllCurrencyList():\n page_num = 20\n total = crawl_db[CrawlCollections.COINMARKET_CURRENCIES].count()\n page = 0\n if total % page_num == 0:\n page = int(total / page_num)\n else:\n page = int(total / page_num) + 1\n count = 0\n for x in range(page):\n items = crawl_db[CrawlCollections.COINMARKET_CURRENCIES].find({}, {'data.symbol': 1}) \\\n .skip(x * page_num).limit(page_num).sort([('data.sort', 1)])\n\n for item in items:\n symbol = item['data']['symbol']\n try:\n msg = requestMyTokenMsg(symbol)\n data = msg['data']\n exchange_rate_display = ''\n ico_date_display = ''\n raised_amount_display = ''\n if 'exchange_rate_display' in data:\n exchange_rate_display = data['exchange_rate_display']\n if 'ico_date_display' in data:\n ico_date_display = data['ico_date_display']\n if 'raised_amount_display' in data:\n raised_amount_display = data['raised_amount_display']\n sdata = {\n 'symbol': symbol,\n 'exchange_rate_display': exchange_rate_display,\n 'ico_date_display': ico_date_display,\n 'raised_amount_display': raised_amount_display\n }\n # data, {'$set': {'symbol': symbol}}\n crawl_db[CrawlCollections.ICOHOLDER_MYTOKEN].update({'symbol': symbol}, sdata, upsert=True)\n time.sleep(1)\n count += 1\n print(count)\n except Exception as ex:\n print(symbol)\n print(ex)\n\n\nif __name__ == '__main__':\n getAllCurrencyList()\n", "sub_path": "api/common/crawl/mytokenrequest.py", "file_name": "mytokenrequest.py", "file_ext": "py", "file_size_in_byte": 3432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "hashlib.md5", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 57, "usage_type": "call"}, {"api_name": "api.db.crawl_db", "line_number": 64, "usage_type": "name"}, {"api_name": "api.db.CrawlCollections.COINMARKET_CURRENCIES", "line_number": 64, "usage_type": "attribute"}, {"api_name": "api.db.CrawlCollections", "line_number": 64, "usage_type": "name"}, {"api_name": "api.db.crawl_db", "line_number": 72, "usage_type": "name"}, {"api_name": "api.db.CrawlCollections.COINMARKET_CURRENCIES", "line_number": 72, "usage_type": "attribute"}, {"api_name": "api.db.CrawlCollections", "line_number": 72, "usage_type": "name"}, {"api_name": "api.db.crawl_db", "line_number": 96, "usage_type": "name"}, {"api_name": "api.db.CrawlCollections.ICOHOLDER_MYTOKEN", "line_number": 96, "usage_type": "attribute"}, {"api_name": "api.db.CrawlCollections", "line_number": 96, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "156067762", "text": "from django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\n\nfrom core.views import CachedObjectMixin, ObjectSessionMixin\nfrom ..conf import settings\nfrom ..models import VocabEntry, VocabProject, VocabSource\n\n\nclass VocabSessionMixin(ObjectSessionMixin):\n\n def setupSession(self, request, *args, **kwargs):\n if \"vocab_entry\" in request.session:\n del request.session[\"vocab_entry\"]\n if \"vocab_source\" in request.session:\n del request.session[\"vocab_source\"]\n if \"vocab_project\" in request.session:\n del request.session[\"vocab_project\"]\n\n\nclass PermissionMixin(object):\n\n def dispatch(self, request, *args, **kwargs):\n self.check_permission()\n return super(PermissionMixin, self).dispatch(request, *args, **kwargs)\n\n def check_permission(self, *args, **kwargs):\n raise NotImplementedError(\"Method check_permission needs to be implemented.\")\n\n\nclass VocabProjectSessionMixin(ObjectSessionMixin):\n session_obj = \"vocab_project\"\n session_obj_attrs = [\"id\", \"name\", \"slug\"]\n\n def setupSession(self, request, *args, **kwargs):\n super(VocabProjectSessionMixin, self).setupSession(request, *args, **kwargs)\n if \"vocab_entry\" in request.session:\n del request.session[\"vocab_entry\"]\n if \"vocab_source\" in request.session:\n del request.session[\"vocab_source\"]\n\n\nclass VocabProjectMixin(\n CachedObjectMixin, VocabProjectSessionMixin,\n):\n vocab_project_id = \"vocab_project_pk\"\n vocab_project_slug = \"vocab_project_slug\"\n vocab_project = None\n\n def dispatch(self, request, *args, **kwargs):\n self.get_vocab_project(request, *args, **kwargs)\n return super(VocabProjectMixin, self).dispatch(request, *args, **kwargs)\n\n def get_vocab_project(self, request, *args, **kwargs):\n if self.vocab_project_id in kwargs:\n self.vocab_project = get_object_or_404(\n VocabProject.objects.prefetch_related(\"owner\"),\n id=kwargs[self.vocab_project_id]\n )\n elif self.vocab_project_slug in kwargs:\n self.vocab_project = get_object_or_404(\n VocabProject.objects.prefetch_related(\"owner\"),\n slug=kwargs[self.vocab_project_slug]\n )\n else:\n obj = self.get_object()\n if hasattr(obj, \"vocab_project_id\"):\n self.vocab_project = obj.vocab_project\n elif isinstance(obj, VocabProject):\n self.vocab_project = obj\n else:\n raise Http404(\"Vocab project not found.\")\n\n def get_context_data(self, **kwargs):\n context = super(VocabProjectMixin, self).get_context_data(**kwargs)\n context[\"vocab_project\"] = self.vocab_project\n return context\n\n\nclass VocabSourcePermissionMixin(PermissionMixin):\n is_vocab_source_creator = False\n\n def get_context_data(self, **kwargs):\n context = super(VocabSourcePermissionMixin, self).get_context_data(**kwargs)\n context[\"is_vocab_source_creator\"] = self.is_vocab_source_creator\n return context\n\n def check_permission(self):\n if self.vocab_source.creator_id == self.request.user.id:\n self.is_vocab_source_creator = True\n else:\n raise PermissionDenied\n\n\nclass VocabSourceSessionMixin(ObjectSessionMixin):\n session_obj = \"vocab_source\"\n session_obj_attrs = [\"id\", \"name\", \"slug\"]\n\n def setupSession(self, request, *args, **kwargs):\n super(VocabSourceSessionMixin, self).setupSession(request, *args, **kwargs)\n if \"vocab_entry\" in request.session:\n del request.session[\"vocab_entry\"]\n if \"vocab_project\" in request.session:\n del request.session[\"vocab_project\"]\n\n\nclass VocabSourceMixin(\n CachedObjectMixin, VocabSourceSessionMixin,\n VocabSourcePermissionMixin\n):\n vocab_source_id = \"vocab_source_pk\"\n vocab_source_slug = \"vocab_source_slug\"\n vocab_project = None\n vocab_source = None\n\n def dispatch(self, request, *args, **kwargs):\n self.get_vocab_source(request, *args, **kwargs)\n return super(VocabSourceMixin, self).dispatch(request, *args, **kwargs)\n\n def get_vocab_source(self, request, *args, **kwargs):\n if self.vocab_source_id in kwargs:\n self.vocab_source = get_object_or_404(\n VocabSource.objects.prefetch_related(\"creator\", \"vocab_project\"),\n id=kwargs[self.vocab_source_id]\n )\n elif self.vocab_source_slug in kwargs:\n self.vocab_source = get_object_or_404(\n VocabSource.objects.prefetch_related(\"creator\", \"vocab_project\"),\n slug=kwargs[self.vocab_source_slug]\n )\n else:\n obj = self.get_object()\n if hasattr(obj, \"vocab_source_id\"):\n self.vocab_source = obj.vocab_source\n elif isinstance(obj, VocabSource):\n self.vocab_source = obj\n else:\n raise Http404(\"Vocab source not found.\")\n self.vocab_project = self.vocab_source.vocab_project\n\n def get_context_data(self, **kwargs):\n context = super(VocabSourceMixin, self).get_context_data(**kwargs)\n context[\"vocab_project\"] = self.vocab_project\n context[\"vocab_source\"] = self.vocab_source\n return context\n\n\nclass VocabSourceSearchMixin(object):\n search_term = None\n vocab_source = None\n\n def dispatch(self, request, *args, **kwargs):\n self.search_term = self.request.GET.get(\"source\", None)\n if self.search_term:\n try:\n self.vocab_source = VocabSource.objects.select_related(\"creator\").get(\n **self.get_search_query_kwargs()\n )\n return self.search_success(**kwargs)\n except VocabSource.DoesNotExist:\n pass\n return super(VocabSourceSearchMixin, self).dispatch(request, *args, **kwargs)\n\n def get_search_query_kwargs(self):\n return {\n \"name__iexact\": self.search_term\n }\n\n def search_success(self, **kwargs):\n return redirect(\n \"vocab:vocab_source_dashboard\",\n vocab_source_pk=self.vocab_source.id,\n vocab_source_slug=self.vocab_source.slug\n )\n\n def get_context_data(self, **kwargs):\n context = super(VocabSourceSearchMixin, self).get_context_data(**kwargs)\n context[\"vocab_source\"] = self.vocab_source\n context[\"search_term\"] = self.search_term\n return context\n\n\nclass VocabSourceSearchAuthMixin(VocabSourceSearchMixin):\n\n def get_search_query_kwargs(self):\n return {\n \"creator_id\": self.request.user.id,\n \"name__iexact\": self.search_term,\n }\n\n\nclass VocabEntryPermissionMixin(PermissionMixin):\n\n def check_permission(self):\n if not self.request.user.is_superuser:\n raise PermissionDenied\n\n\nclass VocabEntrySessionMixin(ObjectSessionMixin):\n session_obj = \"vocab_entry\"\n session_obj_attrs = [\"id\", \"language\", \"entry\", \"slug\"]\n\n def setupSession(self, request, *args, **kwargs):\n super(VocabEntrySessionMixin, self).setupSession(request, *args, **kwargs)\n if \"vocab_source\" in request.session:\n del request.session[\"vocab_source\"]\n if \"vocab_project\" in request.session:\n del request.session[\"vocab_project\"]\n\n\nclass VocabEntryMixin(\n CachedObjectMixin, VocabEntrySessionMixin,\n VocabEntryPermissionMixin\n):\n vocab_entry_id = \"vocab_entry_pk\"\n vocab_entry_language = \"vocab_entry_language\"\n vocab_entry_slug = \"vocab_entry_slug\"\n\n def dispatch(self, request, *args, **kwargs):\n if self.vocab_entry_id in kwargs:\n self.vocab_entry = get_object_or_404(\n VocabEntry,\n id=kwargs[self.vocab_entry_id]\n )\n if self.vocab_entry_language in kwargs and self.vocab_entry_slug in kwargs:\n self.vocab_entry = get_object_or_404(\n VocabEntry,\n language=kwargs[self.vocab_entry_language],\n slug=kwargs[self.vocab_entry_slug]\n )\n else:\n obj = self.get_object()\n if hasattr(obj, \"vocab_entry_id\"):\n self.vocab_entry = obj.vocab_entry\n else:\n self.vocab_entry = obj\n return super(VocabEntryMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(VocabEntryMixin, self).get_context_data(**kwargs)\n context[\"vocab_entry\"] = self.vocab_entry\n return context\n\n\nclass VocabEntrySearchMixin(object):\n search_term = None\n search_language = \"en\"\n vocab_entry = None\n\n def dispatch(self, request, *args, **kwargs):\n self.search_term = self.request.GET.get(\"search_entry\", None)\n self.search_language = self.request.GET.get(\"search_language\", \"en\")\n if self.search_language not in settings.LANGUAGES_DICT:\n self.search_language = \"en\"\n if self.search_term and self.search_language:\n try:\n self.vocab_entry = VocabEntry.objects.get(\n **self.get_search_query_kwargs()\n )\n return self.search_success(**kwargs)\n except VocabEntry.DoesNotExist:\n pass\n return super(VocabEntrySearchMixin, self).dispatch(request, *args, **kwargs)\n\n def get_search_query_kwargs(self):\n return {\n \"entry__iexact\": self.search_term,\n \"language\": self.search_language\n }\n\n def search_success(self, **kwargs):\n return redirect(\n \"vocab:vocab_entry_dashboard\",\n vocab_entry_language=self.vocab_entry.language,\n vocab_entry_slug=self.vocab_entry.slug\n )\n\n def get_context_data(self, **kwargs):\n context = super(VocabEntrySearchMixin, self).get_context_data(**kwargs)\n context[\"vocab_entry\"] = self.vocab_entry\n context[\"search_term\"] = self.search_term\n context[\"search_language\"] = self.search_language\n return context\n", "sub_path": "vocab/views/views_mixins.py", "file_name": "views_mixins.py", "file_ext": "py", "file_size_in_byte": 10235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "core.views.ObjectSessionMixin", "line_number": 10, "usage_type": "name"}, {"api_name": "core.views.ObjectSessionMixin", "line_number": 31, "usage_type": "name"}, {"api_name": "core.views.CachedObjectMixin", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 56, "usage_type": "call"}, {"api_name": "models.VocabProject.objects.prefetch_related", "line_number": 57, "usage_type": "call"}, {"api_name": "models.VocabProject.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.VocabProject", "line_number": 57, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 61, "usage_type": "call"}, {"api_name": "models.VocabProject.objects.prefetch_related", "line_number": 62, "usage_type": "call"}, {"api_name": "models.VocabProject.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.VocabProject", "line_number": 62, "usage_type": "name"}, {"api_name": "models.VocabProject", "line_number": 69, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 72, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 92, "usage_type": "name"}, {"api_name": "core.views.ObjectSessionMixin", "line_number": 95, "usage_type": "name"}, {"api_name": "core.views.CachedObjectMixin", "line_number": 108, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 122, "usage_type": "call"}, {"api_name": "models.VocabSource.objects.prefetch_related", "line_number": 123, "usage_type": "call"}, {"api_name": "models.VocabSource.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "models.VocabSource", "line_number": 123, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 127, "usage_type": "call"}, {"api_name": "models.VocabSource.objects.prefetch_related", "line_number": 128, "usage_type": "call"}, {"api_name": "models.VocabSource.objects", "line_number": 128, "usage_type": "attribute"}, {"api_name": "models.VocabSource", "line_number": 128, "usage_type": "name"}, {"api_name": "models.VocabSource", "line_number": 135, "usage_type": "argument"}, {"api_name": "django.http.Http404", "line_number": 138, "usage_type": "call"}, {"api_name": "models.VocabSource.objects.select_related", "line_number": 156, "usage_type": "call"}, {"api_name": "models.VocabSource.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "models.VocabSource", "line_number": 156, "usage_type": "name"}, {"api_name": "models.VocabSource.DoesNotExist", "line_number": 160, "usage_type": "attribute"}, {"api_name": "models.VocabSource", "line_number": 160, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 170, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 196, "usage_type": "name"}, {"api_name": "core.views.ObjectSessionMixin", "line_number": 199, "usage_type": "name"}, {"api_name": "core.views.CachedObjectMixin", "line_number": 212, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 221, "usage_type": "call"}, {"api_name": "models.VocabEntry", "line_number": 222, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 226, "usage_type": "call"}, {"api_name": "models.VocabEntry", "line_number": 227, "usage_type": "argument"}, {"api_name": "conf.settings.LANGUAGES_DICT", "line_number": 253, "usage_type": "attribute"}, {"api_name": "conf.settings", "line_number": 253, "usage_type": "name"}, {"api_name": "models.VocabEntry.objects.get", "line_number": 257, "usage_type": "call"}, {"api_name": "models.VocabEntry.objects", "line_number": 257, "usage_type": "attribute"}, {"api_name": "models.VocabEntry", "line_number": 257, "usage_type": "name"}, {"api_name": "models.VocabEntry.DoesNotExist", "line_number": 261, "usage_type": "attribute"}, {"api_name": "models.VocabEntry", "line_number": 261, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 272, "usage_type": "call"}]} +{"seq_id": "107570565", "text": "################################################################################\n# reordering and cleaning SSC_test for the upload on github\n# testing the SSC_class\n# 19 May 2017\n# author: Cosimo Nigro (cosimonigro2@gmail.com)\n################################################################################\n\nimport numpy as np\nfrom math import pi\nimport matplotlib.pyplot as plt\nfrom ssc_model import model, numerics, constants\nimport astropy.units as u\nimport timeit\nfrom astropy.table import Table\nimport matplotlib.pylab as pylab\n\ntime_grid = dict(time_min = 0, time_max = 1, time_bins =10 )\ngamma_grid = dict(gamma_min =1, gamma_max = 1e5, gamma_bins =20)\nemission_region = dict(R = 1e16, B = 1, t_esc = 1.5, gamma = 10, theta = 0, z = 0)\ninjected_spectrum = dict(type = 'power-law', norm = 4e0, alpha=-1.7, t_inj = 1)\n\nstart = timeit.default_timer()\n# let us initialize the ssc object\nSSC = model(time_grid, gamma_grid, emission_region, injected_spectrum)\ndist = SSC.distance\nnum = numerics(SSC)\n# let us evolve it\nN_e = num.evolve()\n\n# Saving Results\nResults_Particles = dict(cooling = num.cooling, U_rad=num.U_rad_grid, N_e_grid = num.N_e_grid, time_grid = time_grid, gamma_grid_dict = gamma_grid,\n gamma_grid= SSC.gamma_grid, emission_region=emission_region, injected_spectrum=injected_spectrum)\nnp.save('./Results/Results_Particles_test_x.npy', Results_Particles)\n\n\n# plotting section, optional\nplotting=True\n\nif plotting==False:\n pass\nelse:\n params = {'legend.fontsize': 'large',\n 'figure.figsize': (10, 10),\n 'figure.subplot.bottom': 0.2,\n 'axes.labelsize': 'xx-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'xx-large',\n 'ytick.labelsize':'xx-large'}\n pylab.rcParams.update(params)\n\n fig, axes = plt.subplots(2, 1)\n fig.subplots_adjust(hspace = 0.4)\n font = {'family': 'serif', 'color':'black', 'weight': 'normal', 'size': 16.} # font definitions\n\n # first plot with final electron spectrum\n axes[0].plot(SSC.gamma_grid, N_e)\n axes[0].legend(loc = 0, numpoints = 1., prop = {'size':12.})\n axes[0].set_xlabel(r'$\\gamma$')\n axes[0].set_ylabel(r'$N_{e}$')\n #axes[0].set_xlim(0, 5)\n #axes[0].set_ylim(-8, -4)\n axes[0].set_xscale('log')\n axes[0].set_yscale('log')\n\n # calculate the final SED\n energy = np.logspace(-13, 15, 200) * u.eV\n frequency = energy * (1.6022e-19) / (6.6261e-34) # *5e-5\n k_table = num.absorption(N_e, U_rad=False)\n k = k_table(energy)\n absorption_factor = num.absorption_factor(k)\n Syn = num.synchrotron(N_e)\n IC = num.inverse_compton(N_e, Syn, absorption_factor)\n volume = 4 / 3 * pi * SSC.R ** 3\n\n transversion = 1 / (4 * np.pi * volume * energy.to('erg'))\n SED_SYN = Syn.sed(energy, distance=0)*absorption_factor/SSC.R\n SED_IC = IC.sed(energy, distance=0)\n SED = SED_SYN + SED_IC\n\n\n boosted_energy, boosted_SED = num.doppler(energy, SED)\n final_SED = num.ebl(boosted_energy, boosted_SED)\n\n stop = timeit.default_timer()\n print('Computational time: ')\n print(stop - start, ' s')\n\n axes[1].plot(boosted_energy, final_SED)\n axes[1].legend(loc = 0, numpoints = 1.)\n axes[1].set_xlabel('E [eV]')\n axes[1].set_ylabel(r'E$^2$ dF/dE')\n #axes[1].set_ylim(-20, 7)\n #axes[1].set_xlim(-3, 14)\n axes[1].set_xscale('log')\n axes[1].set_yscale('log')\n\n plt.show()\n", "sub_path": "SSC_test.py", "file_name": "SSC_test.py", "file_ext": "py", "file_size_in_byte": 3417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "timeit.default_timer", "line_number": 22, "usage_type": "call"}, {"api_name": "ssc_model.model", "line_number": 24, "usage_type": "call"}, {"api_name": "ssc_model.numerics", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams.update", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams", "line_number": 49, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.logspace", "line_number": 66, "usage_type": "call"}, {"api_name": "astropy.units.eV", "line_number": 66, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 66, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 75, "usage_type": "attribute"}, {"api_name": "timeit.default_timer", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "254494272", "text": "from functools import cmp_to_key\ndef custom_cmp(a,b):\n if a+b>b+a:\n return -1\n elif a+b 1:\n non_one_cnt += 1\n if non_one_cnt > 1:\n raise Exception(\"expecting model output to be a vector\")\n\n # Model input must have 3 dims, either CHW or HWC\n if len(input_metadata.shape) != 3:\n raise Exception(\n \"expecting input to have 3 dimensions, model '{}' input has {}\".\n format(model_name, len(input_metadata.shape)))\n\n if ((input_config.format != mc.ModelInput.FORMAT_NCHW) and\n (input_config.format != mc.ModelInput.FORMAT_NHWC)):\n raise Exception(\"unexpected input format \" +\n mc.ModelInput.Format.Name(input_config.format) +\n \", expecting \" +\n mc.ModelInput.Format.Name(mc.ModelInput.FORMAT_NCHW) +\n \" or \" +\n mc.ModelInput.Format.Name(mc.ModelInput.FORMAT_NHWC))\n\n if input_config.format == mc.ModelInput.FORMAT_NHWC:\n h = input_metadata.shape[0]\n w = input_metadata.shape[1]\n c = input_metadata.shape[2]\n else:\n c = input_metadata.shape[0]\n h = input_metadata.shape[1]\n w = input_metadata.shape[2]\n\n return (model_config.max_batch_size,\n input_metadata.name, output_metadata.name, c, h, w,\n input_config.format, input_metadata.datatype)\n\n\ndef parse_model_http(model_metadata, model_config):\n \"\"\"\n Check the configuration of a model to make sure it meets the\n requirements for an image classification network (as expected by\n this client)\n \"\"\"\n if len(model_metadata['inputs']) != 1:\n raise Exception(\"expecting 1 input, got {}\".format(\n len(model_metadata['inputs'])))\n if len(model_metadata['outputs']) != 1:\n raise Exception(\"expecting 1 output, got {}\".format(\n len(model_metadata['outputs'])))\n\n if len(model_config['input']) != 1:\n raise Exception(\n \"expecting 1 input in model configuration, got {}\".format(\n len(model_config['input'])))\n\n input_metadata = model_metadata['inputs'][0]\n input_config = model_config['input'][0]\n output_metadata = model_metadata['outputs'][0]\n\n if output_metadata['datatype'] != \"FP32\":\n raise Exception(\"expecting output datatype to be FP32, model '\" +\n model_metadata['name'] + \"' output type is \" +\n output_metadata['datatype'])\n\n # Output is expected to be a vector. But allow any number of\n # dimensions as long as all but 1 is size 1 (e.g. { 10 }, { 1, 10\n # }, { 10, 1, 1 } are all ok).\n non_one_cnt = 0\n for dim in output_metadata['shape']:\n if dim > 1:\n non_one_cnt += 1\n if non_one_cnt > 1:\n raise Exception(\"expecting model output to be a vector\")\n\n # Model input must have 3 dims, either CHW or HWC\n if len(input_metadata['shape']) != 3:\n raise Exception(\n \"expecting input to have 3 dimensions, model '{}' input has {}\".\n format(model_metadata.name, len(input_metadata['shape'])))\n\n if ((input_config['format'] != \"FORMAT_NCHW\") and\n (input_config['format'] != \"FORMAT_NHWC\")):\n raise Exception(\"unexpected input format \" + input_config['format'] +\n \", expecting FORMAT_NCHW or FORMAT_NHWC\")\n\n if input_config['format'] == \"FORMAT_NHWC\":\n h = input_metadata['shape'][0]\n w = input_metadata['shape'][1]\n c = input_metadata['shape'][2]\n else:\n c = input_metadata['shape'][0]\n h = input_metadata['shape'][1]\n w = input_metadata['shape'][2]\n\n max_batch_size = 0\n if 'max_batch_size' in model_config:\n max_batch_size = model_config['max_batch_size']\n\n return (max_batch_size,\n input_metadata['name'], output_metadata['name'], c, h, w,\n input_config['format'], input_metadata['datatype'])\n\n\ndef preprocess(img, format, dtype, c, h, w, scaling):\n \"\"\"\n Pre-process an image to meet the size, type and format\n requirements specified by the parameters.\n \"\"\"\n # np.set_printoptions(threshold='nan')\n\n if c == 1:\n sample_img = img.convert('L')\n else:\n sample_img = img.convert('RGB')\n\n resized_img = sample_img.resize((w, h), Image.BILINEAR)\n resized = np.array(resized_img)\n if resized.ndim == 2:\n resized = resized[:, :, np.newaxis]\n\n npdtype = triton_to_np_dtype(dtype)\n typed = resized.astype(npdtype)\n\n if scaling == 'INCEPTION':\n scaled = (typed / 128) - 1\n elif scaling == 'VGG':\n if c == 1:\n scaled = typed - np.asarray((128,), dtype=npdtype)\n else:\n scaled = typed - np.asarray((123, 117, 104), dtype=npdtype)\n else:\n scaled = typed\n\n # Swap to CHW if necessary\n if FLAGS.protocol.lower() == \"grpc\":\n if format == mc.ModelInput.FORMAT_NCHW:\n ordered = np.transpose(scaled, (2, 0, 1))\n else:\n ordered = scaled\n else:\n if format == \"FORMAT_NCHW\":\n ordered = np.transpose(scaled, (2, 0, 1))\n else:\n ordered = scaled\n\n # Channels are in RGB order. Currently model configuration data\n # doesn't provide any information as to other channel orderings\n # (like BGR) so we just assume RGB.\n return ordered\n\n\ndef postprocess(results, output_name, batch_size):\n \"\"\"\n Post-process results to show classifications.\n \"\"\"\n\n output_array = results.as_numpy(output_name)\n if len(output_array) != batch_size:\n raise Exception(\"expected {} results, got {}\".format(\n batch_size, len(output_array)))\n\n for results in output_array:\n for result in results:\n if output_array.dtype.type == np.bytes_:\n cls = \"\".join(chr(x) for x in result).split(':')\n else:\n cls = result.split(':')\n print(\" {} ({}) = {}\".format(cls[0], cls[1], cls[2]))\n\n\ndef requestGenerator(supports_batching, input_name, output_name, c, h, w, format, dtype, FLAGS):\n # Preprocess image into input data according to model requirements\n image_data = None\n with Image.open(FLAGS.image_filename) as img:\n image_data = preprocess(img, format, dtype, c, h, w, FLAGS.scaling)\n\n if not supports_batching:\n repeated_image_data = image_data\n else:\n repeated_image_data = [image_data for _ in range(FLAGS.batch_size)]\n\n batched_image_data = np.stack(repeated_image_data, axis=0)\n\n # Set the input data\n inputs = []\n if FLAGS.protocol.lower() == \"grpc\":\n inputs.append(\n tritongrpcclient.InferInput(input_name, batched_image_data.shape, dtype))\n inputs[0].set_data_from_numpy(batched_image_data)\n else:\n inputs.append(\n tritonhttpclient.InferInput(input_name, batched_image_data.shape, dtype))\n inputs[0].set_data_from_numpy(batched_image_data, binary_data=False)\n\n outputs = []\n if FLAGS.protocol.lower() == \"grpc\":\n outputs.append(\n tritongrpcclient.InferRequestedOutput(output_name,\n class_count=FLAGS.classes))\n else:\n outputs.append(\n tritonhttpclient.InferRequestedOutput(output_name,\n binary_data=False,\n class_count=FLAGS.classes))\n\n yield inputs, outputs, FLAGS.model_name, FLAGS.model_version\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v',\n '--verbose',\n action=\"store_true\",\n required=False,\n default=False,\n help='Enable verbose output')\n parser.add_argument('-m',\n '--model-name',\n type=str,\n required=True,\n help='Name of model')\n parser.add_argument(\n '-x',\n '--model-version',\n type=str,\n required=False,\n default=\"\",\n help='Version of model. Default is to use latest version.')\n parser.add_argument('-b',\n '--batch-size',\n type=int,\n required=False,\n default=1,\n help='Batch size. Default is 1.')\n parser.add_argument('-c',\n '--classes',\n type=int,\n required=False,\n default=1,\n help='Number of class results to report. Default is 1.')\n parser.add_argument(\n '-s',\n '--scaling',\n type=str,\n choices=['NONE', 'INCEPTION', 'VGG'],\n required=False,\n default='NONE',\n help='Type of scaling to apply to image pixels. Default is NONE.')\n parser.add_argument('-u',\n '--url',\n type=str,\n required=False,\n default='localhost:8000',\n help='Inference server URL. Default is localhost:8000.')\n parser.add_argument('-i',\n '--protocol',\n type=str,\n required=False,\n default='HTTP',\n help='Protocol (HTTP/gRPC) used to communicate with ' +\n 'the inference service. Default is HTTP.')\n parser.add_argument('image_filename', type=str, help='Input image.')\n FLAGS = parser.parse_args()\n\n try:\n if FLAGS.protocol.lower() == \"grpc\":\n # Create gRPC client for communicating with the server\n triton_client = tritongrpcclient.InferenceServerClient(\n url=FLAGS.url, verbose=FLAGS.verbose)\n else:\n # Create HTTP client for communicating with the server\n triton_client = tritonhttpclient.InferenceServerClient(\n url=FLAGS.url, verbose=FLAGS.verbose)\n except Exception as e:\n print(\"context creation failed: \" + str(e))\n sys.exit()\n\n # Make sure the model matches our requirements, and get some\n # properties of the model that we need for preprocessing\n try:\n model_metadata = triton_client.get_model_metadata(\n model_name=FLAGS.model_name, model_version=FLAGS.model_version)\n except InferenceServerException as e:\n print(\"failed to retrieve the metadata: \" + str(e))\n sys.exit()\n\n try:\n model_config = triton_client.get_model_config(\n model_name=FLAGS.model_name, model_version=FLAGS.model_version)\n except InferenceServerException as e:\n print(\"failed to retrieve the config: \" + str(e))\n sys.exit()\n\n if FLAGS.protocol.lower() == \"grpc\":\n max_batch_size, input_name, output_name, c, h, w, format, dtype = parse_model_grpc(\n model_metadata, model_config.config)\n else:\n max_batch_size, input_name, output_name, c, h, w, format, dtype = parse_model_http(\n model_metadata, model_config)\n\n # Send requests of FLAGS.batch_size images. If the number of\n # images isn't an exact multiple of FLAGS.batch_size then just\n # start over with the first images until the batch is filled.\n requests = []\n responses = []\n\n # Send request\n try:\n for inputs, outputs, model_name, model_version in requestGenerator(\n max_batch_size > 0, input_name, output_name, c, h, w, format, dtype, FLAGS):\n responses.append(\n triton_client.infer(FLAGS.model_name,\n inputs,\n model_version=FLAGS.model_version,\n outputs=outputs))\n\n except InferenceServerException as e:\n print(\"inference failed: \" + str(e))\n sys.exit()\n\n for response in responses:\n postprocess(response, output_name, FLAGS.batch_size)\n\n print(\"PASS\")\n", "sub_path": "src/clients/python/experimental_api_v2/examples/v2_image_client.py", "file_name": "v2_image_client.py", "file_ext": "py", "file_size_in_byte": 15164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 85, "usage_type": "name"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 86, "usage_type": "name"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput.Format.Name", "line_number": 88, "usage_type": "call"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 88, "usage_type": "name"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput.Format.Name", "line_number": 90, "usage_type": "call"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 90, "usage_type": "name"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput.Format.Name", "line_number": 92, "usage_type": "call"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 92, "usage_type": "name"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 94, "usage_type": "name"}, {"api_name": "PIL.Image.BILINEAR", "line_number": 186, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 189, "usage_type": "attribute"}, {"api_name": "tritonclientutils.utils.triton_to_np_dtype", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 200, "usage_type": "call"}, {"api_name": "tritongrpcclient.model_config_pb2.ModelInput", "line_number": 206, "usage_type": "attribute"}, {"api_name": "tritongrpcclient.model_config_pb2", "line_number": 206, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.bytes_", "line_number": 234, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 244, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 244, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 252, "usage_type": "call"}, {"api_name": "tritongrpcclient.InferInput", "line_number": 258, "usage_type": "call"}, {"api_name": "tritonhttpclient.InferInput", "line_number": 262, "usage_type": "call"}, {"api_name": "tritongrpcclient.InferRequestedOutput", "line_number": 268, "usage_type": "call"}, {"api_name": "tritonhttpclient.InferRequestedOutput", "line_number": 272, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 280, "usage_type": "call"}, {"api_name": "tritongrpcclient.InferenceServerClient", "line_number": 338, "usage_type": "call"}, {"api_name": "tritonhttpclient.InferenceServerClient", "line_number": 342, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 346, "usage_type": "call"}, {"api_name": "tritonclientutils.utils.InferenceServerException", "line_number": 353, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 355, "usage_type": "call"}, {"api_name": "tritonclientutils.utils.InferenceServerException", "line_number": 360, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 362, "usage_type": "call"}, {"api_name": "tritonclientutils.utils.InferenceServerException", "line_number": 387, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 389, "usage_type": "call"}]} +{"seq_id": "64742459", "text": "# \r\n# Begins web crawler process and saves data into csv file\r\n# \r\n# loads data and indexes it for later use in search engine\r\n#\r\n\r\nimport os\r\nimport math\r\nimport json\r\nimport pickle\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize \r\nfrom django_pandas.io import read_frame\r\nfrom django.apps import AppConfig\r\n\r\nclass indexConfig(AppConfig):\r\n name = 'searchApp'\r\n\r\n #loads and formats dataset\r\n def load_dataset(self):\r\n #importing from inside the function b/c I don't know why but it works\r\n from searchApp.models import productItem\r\n \r\n productModels = productItem.objects.all()\r\n dataframe = read_frame(productModels)\r\n return dataframe\r\n\r\n def index_data(self, dataframe):\r\n stop_words = set(stopwords.words('english')) \r\n\r\n tf_data = {} #term frequency (freq of term from doc)\r\n df_data = {} #document frequency (freq of term from all docs)\r\n idf_data = {} #inverse dense frequency (value you get from calculating DF)\r\n\r\n\r\n #iterates through the dataframe and counts tf for each product\r\n for indx in dataframe.index:\r\n tf = {}\r\n data = dataframe['product_name'][indx]\r\n word_list = word_tokenize(data)\r\n\r\n for word in word_list:\r\n if word in stop_words:\r\n continue\r\n \r\n #calculates term freq for the word\r\n if word in tf:\r\n tf[word] += 1\r\n else:\r\n tf[word] = 1\r\n\r\n #calculates the doc freq of the word\r\n if word in df_data :\r\n df_data[word] += 1\r\n else :\r\n df_data[word] = 1\r\n\r\n tf_data[dataframe['product_url'][indx]] = tf\r\n\r\n #calculate idf\r\n for x in df_data:\r\n idf_data[x] = 1 + math.log10(len(tf_data)/df_data[x])\r\n\r\n tf_idf = {}\r\n\r\n for word in df_data:\r\n list_doc = []\r\n\r\n for indx in dataframe.index:\r\n tf_value = 0\r\n\r\n if word in tf_data[dataframe['product_url'][indx]]:\r\n tf_value = tf_data[dataframe['product_url'][indx]][word]\r\n\r\n weight = tf_value * idf_data[word]\r\n doc = {\r\n \"product_url\": dataframe['product_url'][indx],\r\n \"product_name\": dataframe['product_name'][indx],\r\n \"image_url\": dataframe['image_url'][indx],\r\n \"shop_name\": dataframe['shop_name'][indx],\r\n \"price\": dataframe['price'][indx],\r\n \"score\": weight\r\n }\r\n\r\n if doc['score'] != 0:\r\n if doc not in list_doc:\r\n list_doc.append(doc)\r\n\r\n tf_idf[word] = list_doc\r\n\r\n return tf_idf\r\n\r\n # store tf_idf into the django database\r\n def dict_to_model(self, tf_idf):\r\n from searchApp.models import tfData\r\n\r\n #empties table if alrdy exists\r\n tfData.objects.all().delete()\r\n for key, value in tf_idf.items():\r\n data = tfData.create(key, json.dumps(value))\r\n data.save()\r\n\r\n # where start up code goes\r\n def ready(self):\r\n tf_idf = self.index_data(self.load_dataset())\r\n self.dict_to_model(tf_idf)\r\n pass\r\n", "sub_path": "searchApp/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 3462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.apps.AppConfig", "line_number": 18, "usage_type": "name"}, {"api_name": "searchApp.models.productItem.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "searchApp.models.productItem.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "searchApp.models.productItem", "line_number": 26, "usage_type": "name"}, {"api_name": "django_pandas.io.read_frame", "line_number": 27, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 31, "usage_type": "name"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 42, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 64, "usage_type": "call"}, {"api_name": "searchApp.models.tfData.objects.all", "line_number": 100, "usage_type": "call"}, {"api_name": "searchApp.models.tfData.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "searchApp.models.tfData", "line_number": 100, "usage_type": "name"}, {"api_name": "searchApp.models.tfData.create", "line_number": 102, "usage_type": "call"}, {"api_name": "searchApp.models.tfData", "line_number": 102, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "519759514", "text": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n# np.save('./data/samsung.npy', arr=samsung)\n# 1. samsung numpy 파일 불러오기 (allow_pickle=True)\nsamsung = np.load('./data/samsung.npy', allow_pickle=True)\n# print(samsung)\n# print(samsung.shape) #(620, 5)\n'''\n[[50200 50400 49100 49200 18709146]\n [49200 50200 49150 49850 15918683]\n [50300 50500 49400 49400 10365440]\n ...\n [67000 67000 65600 65700 30204089]\n [65700 66200 64700 64800 22963790]\n [64100 64800 63900 64600 16590290]]\n'''\n\n# 2. x축, y축 자르기\n\n'''\ndef split_x(seq, size):\n aaa = [] # 임시 리스트\n # i는 0부터 seq사이즈-size까지 반복 \n # (그래야 size만큼씩 온전히 자를 수 있다)\n for i in range(len(seq) -size +1 ):\n subset = seq[i:(i+size)] # subset은 i부터 size만큼 배열 저장\n aaa.append([subset]) # 배열에 subset을 붙인다\n print(type(aaa)) # aaa의 타입은 리스트\n return np.array(aaa) # 리스트를 어레이로 바꿔서 반환하자\n'''\n\n\ndef split_xy(dataset, time_steps, y_column):\n x, y = list(), list()\n for i in range(len(dataset)):\n x_end_number = i + time_steps\n y_end_number = x_end_number + y_column\n\n if y_end_number > len(dataset):\n break\n tem_x = dataset[i:x_end_number, :]\n tem_y = dataset[x_end_number:y_end_number, 3]\n\n x.append(tem_x)\n y.append(tem_y)\n return np.array(x), np.array(y)\n\nx, y = split_xy(samsung, 5, 1)\nprint(samsung)\nprint(x, \"\\n\", y)\n\n# 3.데이터 전처리 \nprint(x.shape)\nprint(y.shape)\n'''\n(615, 5, 5)\n(615, 1)\n'''\n\n# train_test_split\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, shuffle = True, train_size=0.7)\n\nprint(x_train.shape) #(430, 5, 5)\nprint(x_test.shape) #(185, 5, 5)\n\n# reshape\n# ValueError: Found array with dim 3. StandardScaler expected <= 2\nx_train = x_train.reshape(430, 5*5)\nx_test = x_test.reshape(185, 5*5)\n\nprint(x_train.shape) #(430, 25)\nprint(x_test.shape) #(185, 25)\n\n# scaler \nscaler = StandardScaler()\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\nprint(x_train[0,:])\n\n# 4. samsung 모델 만들기\nmodel = Sequential()\nmodel.add(Dense(64, input_shape=(25,)))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n'''\nModel: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param #\n=================================================================\ndense (Dense) (None, 64) 1664\n_________________________________________________________________\ndense_1 (Dense) (None, 32) 2080\n_________________________________________________________________\ndense_2 (Dense) (None, 32) 1056\n_________________________________________________________________\ndense_3 (Dense) (None, 32) 1056\n_________________________________________________________________\ndense_4 (Dense) (None, 32) 1056\n_________________________________________________________________\ndense_5 (Dense) (None, 1) 33\n=================================================================\nTotal params: 6,945\nTrainable params: 6,945\nNon-trainable params: 0\n_________________________________________________________________\n'''", "sub_path": "homework/1120_samsung_predict.py", "file_name": "1120_samsung_predict.py", "file_ext": "py", "file_size_in_byte": 3707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.load", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "639908419", "text": "#!/bin/python\n\nimport os\nimport sys\nimport json\nfrom pprint import pprint\nimport xml.etree.ElementTree as ET\nimport gzip\nfrom numpy import mean, std\nimport traceback\n\n\n# The first thing we need to do is parse the simulation file\n# to work how what nodes are neighbours of other nodes\ndef calculateNeighbours(simulation):\n\ttree = ET.parse(simulation)\n\tsimNode = tree.getroot().find(\"simulation\")\n\n\ttxrange = float(simNode.find(\"radiomedium\").find(\"transmitting_range\").text)\n\ttxrange2 = txrange * txrange\n\n\tnodeCoords = {}\n\n\tfor childNode in simNode.findall(\"mote\"):\n\t\tnodeId = None\n\t\tx = None\n\t\ty = None\n\n\t\tfor confNode in childNode.findall(\"interface_config\"):\n\t\t\tidNode = confNode.find(\"id\")\n\t\t\txNode = confNode.find(\"x\")\n\t\t\tyNode = confNode.find(\"y\")\n\n\t\t\tif idNode is not None:\n\t\t\t\tnodeId = int(idNode.text)\n\n\t\t\tif xNode is not None:\n\t\t\t\tx = float(xNode.text)\n\n\t\t\tif yNode is not None:\n\t\t\t\ty = float(yNode.text)\n\n\t\tnodeCoords[nodeId] = (x, y)\n\n\t#pprint(nodeCoords)\n\n\tdef nodeDistance2(a, b):\n\t\treturn (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2\n\n\tn = {}\n\n\tfor a in nodeCoords.keys():\n\t\tn[a] = []\n\n\t\tfor b in nodeCoords.keys():\n\t\t\tif a != b:\n\t\t\t\t# This maths was taken from csc-compute-neighbor-stats\n\t\t\t\tif nodeDistance2(nodeCoords[a], nodeCoords[b]) <= txrange2:\n\t\t\t\t\tn[a].append(b)\n\n\treturn n\n\n#pprint(neighbours)\n# We have now finished finding out neighbours\n\n\ndef latestValues(values, keyName, maxTime=None):\n\tresult = {}\n\n\tfor value in values:\n\n\t\tnodeId = value[keyName]\n\n\t\t# Not seen this node before\n\t\tif nodeId not in result:\n\t\t\tresult[nodeId] = value\n\n\t\t# Seen this node before\n\t\telse:\n\t\t\tstored = result[nodeId]\n\n\t\t\t# Check if this is a latter time\n\t\t\tif value[u\"clock\"] > stored[u\"clock\"]:\n\t\t\t\tif maxTime is None or value[u\"clock\"] <= maxTime:\n\t\t\t\t\t# Newer so update\n\t\t\t\t\tresult[nodeId] = value\n\n\treturn result\n\ndef totalSentRecv(moteResults):\n\trx = 0\n\ttx = 0\n\tfor value in moteResults.values():\n\t\trx += value[u\"rx\"]\n\t\ttx += value[u\"tx\"]\n\n\treturn {u\"rx\": rx, u\"tx\": tx}\n\n# Evaluates the following predicate\n# This can be used to get the expected result\n#using Neighbours(1) as onehopn in\n#\t\t@(a : onehopn ~\n#\t\t\tslot(a) != slot(this) &\n#\t\t\t@(b : onehopn ~ addr(a) != addr(b) => slot(a) != slot(b))\n#\t\t)\ndef predicate1(this, neighbours, slots):\n\n\tonehopn = neighbours[this]\n\n\tresult = True\n\tfor a in onehopn:\n\t\tresult &= slots[a] != slots[this]\n\t\tfor b in onehopn:\n\t\t\tresult &= (a == b or slots[a] != slots[b])\n\treturn result\n\t\n#using Neighbours(3) as twohopn in\n#\t\t@(a : twohopn ~\n#\t\t\tslot(a) != slot(this)\n#\t\t)\ndef predicate2(this, neighbours, slots):\n\n\ttwohopn = set()\n\t\n\tfor n in neighbours[this]:\n\t\ttwohopn.add(n)\n\t\ttwohopn.update(neighbours[n])\n\n\tresult = True\n\tfor a in twohopn:\n\t\tresult &= slots[a] != slots[this]\n\treturn result\n\nclass AnalyseFile:\n\tdef __init__(self, path, neighbours, predicate):\n\t\twith gzip.open(path, 'rb') as f:\n\t\t\tself.data = json.load(f)\n\t\t\t\n\t\tself.motes = self.data[u\"motes\"]\n\n\t\t# Extract the last energy results\n\t\tself.rime = latestValues(self.data[u\"stats\"][u\"rime\"], u\"S\")\n\t\tself.energy = latestValues(self.data[u\"stats\"][u\"energy\"], u\"E\")\n\t\tself.TDMA = latestValues(self.data[u\"stats\"][u\"TDMA\"], u\"STDMA\")\n\n\t\t# Calculate how much energy predicate evaulation required\n\t\tself.pe = {}\n\t\tfor mote in self.motes:\n\t\t\tresult = {}\n\n\t\t\ttotal = self.rime[mote]\n\t\t\ttdma = self.TDMA[mote]\n\n\t\t\tfor key in (u\"tx\", u\"rx\"):\n\t\t\t\tresult[key] = total[key] - tdma[key]\n\n\t\t\tself.pe[mote] = result\n\n\t\t# Calculate totals\n\t\tself.rimeTotal = totalSentRecv(self.rime)\n\t\tself.TDMATotal = totalSentRecv(self.TDMA)\n\t\tself.peTotal = totalSentRecv(self.pe)\n\n\t\t# Predicate evaluation analysis\n\t\tself.responsesReachedSink = 0\n\t\tself.totalPredicatesSent = 0\n\n\t\tself.totalPredicates = 0\n\n\t\tself.predicatesFailed = 0\n\t\tself.predicatesSucceeded = 0\n\n\t\tself.predicatesCorrectlyEvaluated = 0\n\t\tself.predicatesIncorrectlyEvaluated = 0\n\t\t\n\t\tisGlobal = self.data[u\"peType\"] in (u\"pege\", u\"pegp\")\n\t\t\n\t\tfor pred in self.data[u\"predicate\"]:\n\t\t\ton = int(pred[u\"on\"])\n\t\t\tnode = int(str(pred[u\"node\"]).split(\".\")[0])\n\t\t\tresult = int(pred[u\"result\"])\n\t\t\t\n\t\t\tif isGlobal:\n\t\t\t\tif result == 0:\n\t\t\t\t\tself.responsesReachedSink += 1\n\t\t\t\t\tself.totalPredicatesSent += 1\n\t\t\t\t\tself.predicatesFailed += 1\n\t\t\t\telse:\n\t\t\t\t\tself.predicatesSucceeded += 1\n\t\t\t\n\t\t\t\t# Lets now evaluate the predicate ourselves\n\t\t\t\tr = predicate(node, neighbours, self.dataAt(pred[u\"clock\"]))\n\n\t\t\t\tif (r == (result == 1)):\n\t\t\t\t\tself.predicatesCorrectlyEvaluated += 1\n\t\t\t\telse:\n\t\t\t\t\tself.predicatesIncorrectlyEvaluated += 1\n\t\t\t\t\t\n\t\t\t\tself.totalPredicates += 1\n\t\t\t\n\t\t\telse:\n\t\t\t\t# Reached the sink if they got to node 1\n\t\t\t\t# which was the sink\n\t\t\t\tif on == 1:\n\t\t\t\t\tself.responsesReachedSink += 1\n\t\t\t\t\n\t\t\t\t# Count only the predicates that were printed out from\n\t\t\t\t# the origin, not after they were preprinted at the sink\n\t\t\t\tif on != 1 or node == 1:\n\n\t\t\t\t\tif result == 0:\n\t\t\t\t\t\tself.totalPredicatesSent += 1\n\t\t\t\t\t\tself.predicatesFailed += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.predicatesSucceeded += 1\n\n\t\t\t\t\t# Lets now evaluate the predicate ourselves\n\t\t\t\t\tr = predicate(node, neighbours, self.dataAt(pred[u\"clock\"]))\n\n\t\t\t\t\tif (r == (result == 1)):\n\t\t\t\t\t\tself.predicatesCorrectlyEvaluated += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.predicatesIncorrectlyEvaluated += 1\n\t\t\t\t\t\t\n\t\t\t\t\tself.totalPredicates += 1\n\t\n\t\tself.responsesReachedSinkPC = float(self.responsesReachedSink) / float(self.totalPredicatesSent)\n\t\t\n\t\tself.successRate = float(self.predicatesSucceeded) / float(self.totalPredicates)\n\t\tself.failureRate = float(self.predicatesFailed) / float(self.totalPredicates)\n\n\t\tself.pcCorrectlyEvaluated = float(self.predicatesCorrectlyEvaluated) / float(self.totalPredicates)\n\n\n\t# Gets the slot value of a given node at the given time\n\tdef dataAt(self, time):\n\t\treturn {\n\t\t\tnodeId: nodeData[u\"slot\"]\n\t\t\tfor (nodeId, nodeData)\n\t\t\tin latestValues(self.data[u\"stats\"][u\"TDMA\"], u\"STDMA\", time).items()\n\t\t}\n\n\n\ndef meanStdAttr(items, attrName):\n\treturn (mean([getattr(x, attrName) for x in items]), std([getattr(x, attrName) for x in items]))\n\ndef meanStdAttrXx(items, attrName, X):\n\treturn (mean([getattr(x, attrName)[X] for x in items]), std([getattr(x, attrName)[X] for x in items]))\n\ndef meanStdAttrTxRx(items, attrName):\n\treturn {\"rx\": meanStdAttrXx(items, attrName, u\"rx\"), \"tx\": meanStdAttrXx(items, attrName, u\"tx\")}\n\nresults = {}\n\nfor peType in os.listdir('TDMA'):\n\tresults[peType] = {}\n\n\tfor predicateDist in os.listdir('TDMA/' + peType):\n\t\tresults[peType][predicateDist] = {}\n\t\t\n\t\tpredicate = None\n\t\tif predicateDist == '1HOP':\n\t\t\tpredicate = predicate1\n\t\telif predicateDist == '2HOP':\n\t\t\tpredicate = predicate2\n\t\telse:\n\t\t\traise Exception('Unknown {0}'.format(predicateDist))\n\n\t\tfor size in os.listdir('TDMA/' + peType + \"/\" + predicateDist):\n\t\t\tresults[peType][predicateDist][size] = {}\n\n\t\t\tneighbours = calculateNeighbours(\n\t\t\t\t'TDMA/' + peType + \"/\" + predicateDist + \"/\" + size + \"/TDMA.csc\")\n\t\t\t\t\n\t\t\tpprint(neighbours)\n\n\t\t\tfor period in os.listdir('TDMA/' + peType + \"/\" + predicateDist + \"/\" + size):\n\n\t\t\t\tif not os.path.isdir('TDMA/' + peType + \"/\" + predicateDist + \"/\" + size + \"/\" + period):\n\t\t\t\t\tcontinue\n\n\t\t\t\tresults[peType][predicateDist][size][period] = {}\n\n\t\t\t\tpath = 'TDMA/' + peType + \"/\" + predicateDist + \"/\" + size + \"/\" + period\n\n\t\t\t\tlocalResults = []\n\n\t\t\t\tfor resultsFile in os.listdir(path):\n\n\t\t\t\t\tprint(path + \"/\" + resultsFile)\n\n\t\t\t\t\ttry:\n\n\t\t\t\t\t\ta = AnalyseFile(path + \"/\" + resultsFile, neighbours, predicate)\n\n\t\t\t\t\t\tlocalResults.append(a)\n\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\ttraceback.print_exc()\n\n\t\t\t\t# We need to find the average and standard deviation\n\n\t\t\t\tresults[peType][predicateDist][size][period][\"pcResponsesReachedSink\"] = meanStdAttr(localResults, \"responsesReachedSinkPC\")\n\t\t\t\t#results[peType][size][\"pcSuccessRate\"] = meanStdAttr(localResults, \"successRate\")\n\t\t\t\tresults[peType][predicateDist][size][period][\"pcCorrectlyEvaluated\"] = meanStdAttr(localResults, \"pcCorrectlyEvaluated\")\n\n\t\t\t\tresults[peType][predicateDist][size][period][\"messagesPE\"] = meanStdAttrTxRx(localResults, \"peTotal\")\n\t\t\t\tresults[peType][predicateDist][size][period][\"messagesTDMA\"] = meanStdAttrTxRx(localResults, \"TDMATotal\")\n\t\t\t\tresults[peType][predicateDist][size][period][\"messagesTotal\"] = meanStdAttrTxRx(localResults, \"rimeTotal\")\n\n\npprint(results)\n\n\n# Done with the processing of results, now lets generate some graph files\n\n\n# Some utility functions\n# From: http://ginstrom.com/scribbles/2007/09/04/pretty-printing-a-table-in-python/\ndef get_max_width(table, index):\n\t\"\"\"Get the maximum width of the given column index.\"\"\"\n\n\treturn max([len(str(row[index])) for row in table])\n\t\n# From: http://ginstrom.com/scribbles/2007/09/04/pretty-printing-a-table-in-python/\ndef pprint_table(out, table):\n\t\"\"\"Prints out a table of data, padded for alignment\n\t@param out: Output stream (file-like object)\n\t@param table: The table to print. A list of lists.\n\tEach row must have the same number of columns.\"\"\"\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')\n\n# From: http://stackoverflow.com/questions/273192/python-best-way-to-create-directory-if-it-doesnt-exist-for-file-write\ndef ensureDir(f):\n\td = os.path.dirname(f)\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)\n\t\t\ndef keyToDirName(period, bytecode, about, accessor):\n\treturn 'Graphs/' + period + '/' + bytecode + '/' + about + '/' + ('' if accessor is None else accessor + '/')\n\t\t\ndef graph(allvalues, title, labelX, labelY, keyName, accessorKey=None, rangeY=None, keypos='right top', kind='pdf'):\n\n\trearranged = {}\n\n\tfor (algorithm, items0) in allvalues.items():\n\t\tfor (bytecode, items1) in items0.items():\n\t\t\tfor (size, items2) in items1.items():\n\t\t\t\tfor (period, items3) in items2.items():\n\t\t\t\t\tdirName = keyToDirName(period, bytecode, keyName, accessorKey)\n\t\t\t\t\t\n\t\t\t\t\t# Ensure that the dir we want to put the files in\n\t\t\t\t\t# actually exists\n\t\t\t\t\tensureDir(dirName)\n\t\t\t\t\n\t\t\t\t\tif accessorKey is None:\n\t\t\t\t\t\trearranged.setdefault((period, bytecode, keyName, None), {}).setdefault(int(size), {})[algorithm] = items3[keyName]\n\t\t\t\t\telse:\n\t\t\t\t\t\trearranged.setdefault((period, bytecode, keyName, accessorKey), {}).setdefault(int(size), {})[algorithm] = items3[keyName][accessorKey]\n\t\t\t\t\n\tpprint(rearranged)\n\t\n\tfor (key, values) in rearranged.items():\n\t\tdirName = keyToDirName(*key)\n\t\t\n\t\tsizes = list(sorted(values.keys()))\n\t\t\n\t\tvarying = {x for item in values.values() for x in item.keys()}\n\t\t\n\t\t# Write our data\n\t\tdatFileName = dirName + 'graph.dat'\n\t\twith open(datFileName, 'w') as datFile:\n\n\t\t\ttable = [ ['#Size'] + ['Value', 'StdDev']*len(varying) ]\n\t\t\t\n\t\t\t# We want to print out rows in the correct\n\t\t\t# size order, so iterate through sizes this way\n\t\t\tfor size in sizes:\n\t\t\t\trow = [ size ]\n\t\t\t\tfor vary in varying:\n\t\t\t\t\tif vary in values[size]:\n\t\t\t\t\t\trow += [ values[size][vary][0], values[size][vary][1] ]\n\t\t\t\t\telse:\n\t\t\t\t\t\trow += [ '?', '?' ]\n\t\t\t\t\t\n\t\t\t\ttable.append( row )\n\t\t\t\n\t\t\tpprint_table(datFile, table)\n\t\n\t\t# Write out the graph definition file\n\t\tpFileName = dirName + 'graph.p'\n\t\twith open(pFileName, 'w') as pFile:\n\n\t\t\tpFile.write('set xlabel \"{0}\"\\n'.format(labelX))\n\t\t\tpFile.write('set ylabel \"{0}\"\\n'.format(labelY))\n\t\t\tpFile.write('set pointsize 1\\n')\n\t\t\tpFile.write('set key {0}\\n'.format(keypos))\n\t\t\tpFile.write('set title \"{0}\"\\n'.format(title))\n\n\t\t\t# Should remain the same as we are testing with\n\t\t\t# a limited sized grid of nodes\n\t\t\tpFile.write('set xrange [{0}:{1}]\\n'.format(min(sizes) - 1, max(sizes) + 1))\n\t\t\tpFile.write('set xtics ({0})\\n'.format(','.join(map(str, sizes))))\n\n\t\t\tif rangeY is not None:\n\t\t\t\tpFile.write('set yrange [{0}:{1}]\\n'.format(rangeY[0], rangeY[1]))\n\t\t\telse:\n\t\t\t\tpFile.write('set yrange auto\\n')\n\t\t\t\t\n\t\t\tpFile.write('set ytics auto\\n')\n\t\t\t\n\t\t\tif kind == 'pdf':\n\t\t\t\tpFile.write('set terminal pdf enhanced\\n')\n\t\t\t\tpFile.write('set output \"graph.pdf\" \\n')\n\t\t\telif kind == 'ps':\n\t\t\t\tpFile.write('set terminal postscript enhanced 22\\n')\n\t\t\t\tpFile.write('set output \"graph.ps\"\\n')\n\t\t\telse:\n\t\t\t\tpFile.write('set terminal postscript eps enhanced 22\\n')\n\t\t\t\tpFile.write('set output \"graph.eps\"\\n')\n\t\t\t\n\t\t\tpFile.write('plot ')\n\t\t\t\n\t\t\tfor (i, vary) in enumerate(varying):\n\t\t\t\n\t\t\t\tvalueIndex = 2 * (i + 1)\n\t\t\t\tstddevIndex = valueIndex + 1\n\t\t\t\n\t\t\t\tpFile.write('\"graph.dat\" u 1:{1}:{2} w errorlines ti \"{0}\"'.format(vary, valueIndex, stddevIndex))\n\t\t\t\t\n\t\t\t\tif i + 1 != len(varying):\n\t\t\t\t\tpFile.write(',\\\\\\n')\n\t\t\t\n\t\t\tpFile.write('\\n')\n\t\t\ngraph(results, 'Predicates Correctly Evaluated', 'Network Size', 'Percentage Correctly Evaluated', 'pcCorrectlyEvaluated', rangeY=(0, 1), keypos='right bottom')\n\ngraph(results, 'Response Reached Sink', 'Network Size', 'Percentage Correctly Evaluated', 'pcResponsesReachedSink', rangeY=(0, 1))\n\ngraph(results, 'PE Tx', 'Network Size', 'Messages Sent', 'messagesPE', accessorKey='tx', rangeY=(0, '*'), keypos='right bottom')\ngraph(results, 'PE Rx', 'Network Size', 'Messages Received', 'messagesPE', accessorKey='rx', rangeY=(0, '*'), keypos='right bottom')\n\n", "sub_path": "Results/analyse.py", "file_name": "analyse.py", "file_ext": "py", "file_size_in_byte": 13090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 16, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 136, "usage_type": "call"}, {"api_name": "json.load", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 249, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 256, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 259, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 270, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 276, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 289, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 300, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 350, "usage_type": "call"}, {"api_name": "os.path", "line_number": 350, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 352, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 376, "usage_type": "call"}]} +{"seq_id": "75048774", "text": "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nbirddata = pd.read_csv(\"bird_tracking.csv\")\r\nbirddata.info()\r\n\r\n# get the indices\r\nix = birddata.bird_name == 'Eric'\r\nbirddata[ix]\r\n\r\nx, y = birddata.longitude[ix], birddata.latitude[ix]\r\nplt.figure(figsize=(10,10))\r\nplt.plot(x, y, \".\")\r\n\r\n# Unique names of the birds\r\nbird_names = pd.unique(birddata.bird_name)\r\n\r\n# Plot for all 3 birds\r\nplt.figure(figsize=(10, 10))\r\nfor bird_name in bird_names:\r\n # get indices of the given bird\r\n ix = birddata.bird_name == bird_name\r\n # get lat/long for the extracted indices of the current bird\r\n x, y = birddata.longitude[ix], birddata.latitude[ix]\r\n # also add the label for each bird\r\n plt.plot(x, y, \".\", label=bird_name)\r\nplt.xlabel(\"Longitude\")\r\nplt.ylabel(\"Latitude\")\r\nplt.legend(loc=\"lower right\")\r\n\r\n# SPEED DATA\r\n\r\n# For now, just for Eric\r\nix = birddata.bird_name == \"Eric\"\r\nspeed = birddata.speed_2d[ix]\r\n#now let's plot a histogram\r\nplt.hist(speed)\r\n# You get an error or not all the values are plotted. If you examine the data, you'll see NaN values\r\n# Look fof NaN. There are some true's if you look\r\nnp.isnan(speed)\r\n# or we can use the any method\r\nnp.isnan(speed).any()\r\n# let's find out how many/ True = 1 false = 0\r\nnp.sum(np.isnan(speed))\r\n# 85\r\n# Data boolean vector is return by isnan\r\nind = np.isnan(speed)\r\n~ind\r\nix = birddata.bird_name == \"Eric\"\r\nspeed = birddata.speed_2d[ix]\r\nind = np.isnan(speed)\r\n#now let's plot a histogram\r\nplt.hist(speed[~ind])\r\n\r\nplt.figure(figsize=(8,4))\r\n\r\n\r\n", "sub_path": "Week3CaseStudy5TrackGPSBirdData.py", "file_name": "Week3CaseStudy5TrackGPSBirdData.py", "file_ext": "py", "file_size_in_byte": 1544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "pandas.unique", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "459181923", "text": "import argparse\nimport json, os\nimport jsonlist\nfrom functools import reduce\n\ndef clean_claim(claim_text_lis):\n \"\"\"\n Returns a list of claims with metadata removed.\n Don't call multiple times, on same lis.\n \"\"\"\n clean_claims = []\n for claim_text in claim_text_lis:\n if claim_text is not None:\n clean_claim = ' '.join(claim_text.split()[1:]).strip(' ') #Remove initial number\n if clean_claim!='':\n clean_claims.append( clean_claim ) \n \n return clean_claims\n\ndef clean_premise(premise_lis):\n \"\"\"\n Returns a list of premises with meta data removed.\n \"\"\"\n if type(premise_lis) is dict:\n premise_lis = reduce(lambda x, y: x+y, [v if v is not None else [] for k,v in premise_lis.items()], [])\n clean_premises = []\n for lis in premise_lis:\n if lis is not None:\n clean_premises += clean_claim(lis)\n return clean_premises\n\ndef mark_comment(comment, claim_lis=None, premise_lis=None):\n \"\"\"\n Adds / tags to comment.\n \"\"\"\n comment = ' '.join(comment.split(' '))\n\n comment = ' '+comment+' '\n \n if claim_lis is not None:\n for claim in claim_lis:\n claim = ' '.join(claim.split(' '))\n claim = claim.strip(' ')\n print(\"Replacing CLAIM : \", claim)\n comment = comment.replace(claim, ''+claim+'')\n \n if premise_lis is not None:\n for premise in premise_lis:\n premise = ' '.join(premise.split(' '))\n premise = premise.strip(' ')\n print(\"Replacing PREMISE : \", premise)\n comment = comment.replace(premise, ''+premise+'')\n \n return comment[1:-1]\n\ndef format_annotation(annotation, post_tree):\n \"\"\"\n Modifies annotation to add claim and premise tags and returns xml.\n \"\"\"\n xml_out = ''\n comment_ids = [elem['id'] for elem in post_tree['comments']]\n \n comment1_id = annotation['Comment1']\n comment2_id = annotation['Comment2']\n \n #Preparing XML for Comment 1\n if comment1_id in comment_ids:\n cur_comment = post_tree['comments'][comment_ids.index(comment1_id)]\n if 'ann_claim_premise' not in cur_comment:\n cur_comment['ann_claim_premise'] = mark_comment(cur_comment['body'],\n clean_claim(annotation['Claim1']) if 'Claim1' in annotation else None,\n clean_premise(annotation['Premise1']) if 'Premise1' in annotation else None)\n \n xml_out += ''+cur_comment['ann_claim_premise']+''\n \n elif comment1_id == post_tree['id']:\n if 'ann_claim_premise' not in post_tree:\n post_tree['ann_claim_premise'] = mark_comment(post_tree['selftext'],\n clean_claim(annotation['Claim1']) if 'Claim1' in annotation else None,\n clean_premise(annotation['Premise1']) if 'Premise1' in annotation else None)\n\n xml_out += ''+post_tree['ann_claim_premise']+''\n \n else:\n raise AssertionError(\"Comment id : \", comment1_id, \" not found in the post tree : \", post_tree)\n \n #Preparing XML for Comment 2\n if comment2_id in comment_ids:\n cur_comment = post_tree['comments'][comment_ids.index(comment2_id)]\n \n if 'ann_claim_premise' not in cur_comment:\n cur_comment['ann_claim_premise'] = mark_comment(cur_comment['body'],\n clean_claim(annotation['Claim2']) if 'Claim2' in annotation else None,\n clean_premise(annotation['Premise2']) if 'Premise2' in annotation else None)\n \n xml_out += ''+cur_comment['ann_claim_premise']+''\n else:\n raise AssertionError(\"Comment id : \", comment2_id, \" not found in the post tree : \", post_tree)\n \n return xml_out\n\ndef get_next_file_name(write_dir):\n i = 0\n file_name = os.path.join(write_dir, str(i)+'.xml')\n while True:\n while os.path.isfile(file_name):\n i+=1\n file_name = os.path.join(write_dir, str(i)+'.xml')\n yield file_name\n\ndef write_xml(thread_xml, write_dir, file_name_iter):\n xml_content = \"\"\" \\n\"\"\"\n for elem in thread_xml:\n xml_content+=(elem+'\\n')\n xml_content+=\"\"\n\n with open(next(file_name_iter), 'w') as f:\n f.write(xml_content)\n\nif __name__ == '__main__' :\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--json_file', type=str, help='Json file with annotations to be converted to XML file')\n parser.add_argument('--reddit_file', type==str, help='Jsonlist File with reddit comments; in the format of data of https://chenhaot.com/pages/changemyview.html.\\\n This file will be searched for comments matching those in json_file')\n parser.add_argument('--write_dir', type=str, help='Directory to which the program should write the generated xml files.')\n \n args = parser.parse_args()\n \n file_name_iter = get_next_file_name(args.write_dir)\n \n with open(args.json_file, 'r') as f:\n ft_data = json.load(f)\n\n print(\"Loaded finetuning data\")\n train_data = jsonlist.load_file(jsonlist.load_file(args.reddit_file))\n\n annotations = []\n for key in ft_data.keys():\n for k, annotation in ft_data[key].items():\n annotations.append(annotation)\n annotations = annotations[:-10] #Remove Last 10 annotations, they have discrepancy b/w Claim1 and Claim2\n\n post_ids = [elem['id'] for elem in train_data]\n\n post_comment_ids = [ elem['id'] for elem in train_data ]\n parent_post_ids = { elem : elem for elem in post_comment_ids }\n for elem in train_data:\n for c in elem['comments']:\n post_comment_ids += [ c['id'] ]\n parent_post_ids[c['id']] = elem['id']\n\n\n i = 0\n while i', views.remove_from_cart, name=\"remove\"),\n path('clear/', views.clear_cart, name=\"clear\")\n]\n", "sub_path": "django-ecommerce/cart/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "cart.views.cart_details", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cart.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "cart.views.add_to_cart", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cart.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "cart.views.remove_from_cart", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cart.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "cart.views.clear_cart", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cart.views", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "116198181", "text": "###############################################################################\n# Mini Web Server\n#\n# Created by Zerynth Team 2015 CC\n# Authors: G. Baldi, D. Mazzei\n###############################################################################\n# webserver+weather multiple threads\n#\n# Thanks to Zerynth Team 2015 CC\n# Author: Vikas Reddy @ vikaskichik@gmail.com\n#\n# Device: ESP32 Dev Kit V1\n###############################################################################\n\n\n# import streams & socket\nimport streams\nimport socket\n\n# import json parser, will be needed later\nimport json\n\n# import the wifi interface\nfrom wireless import wifi\n\n# import the http module\nimport requests\n\n# the wifi module needs a networking driver to be loaded\n# in order to control the board hardware.\n# FOR THIS EXAMPLE TO WORK, A NETWORK DRIVER MUST BE SELECTED BELOW\n\n# uncomment the following line to use the CC3000 driver (Particle Core or CC3000 Wifi shields)\n# from texas.cc3000 import cc3000 as wifi_driver\n\n# uncomment the following line to use the BCM43362 driver (Particle Photon)\n# from broadcom.bcm43362 import bcm43362 as wifi_driver\nfrom espressif.esp32net import esp32wifi as wifi_driver\n\n#Start serial port\nstreams.serial()\n\n#Adding html file as a new resource and use it\nnew_resource(\"index.html\")\n\nprint(\"Device boot up Success\")\n\n# init the wifi driver!\n# The driver automatically registers itself to the wifi interface\n# with the correct configuration for the selected board\nwifi_driver.auto_init()\n# use the wifi interface to link to the Access Point\n# change network name, security and password as needed\nprint(\"Connecting to Wifi Access Point...\")\ntry:\n # FOR THIS EXAMPLE TO WORK, \"Network-Name\" AND \"Wifi-Password\" MUST BE SET\n # TO MATCH YOUR ACTUAL NETWORK CONFIGURATION\n wifi.link(\"Wifi-AP\",wifi.WIFI_WPA2,\"password\")\n\nexcept Exception as e:\n print(\"ooops, something wrong while conecting to Wifi Access Point:(\", e)\n while True:\n sleep(1000)\n\n# Yes! we are connected\nprint(\"Connected to Wifi!\")\n\n# Let's print our ip, it will be needed soon\ninfo = wifi.link_info()\nprint(\"My IP is:\",info[0])\n\n#Weather api key\napi_key = \"Get api key from openweathermap.org\"\n\n#Have global varable for storing weather information\nglobal Winfo\nWinfo = {\"Weather\":\"NA\",\"Temperature\":\"NA\",\"Pressure\":\"NA\",\"Humidity\":\"NA\",\"Windspeed\":\"NA\",\"Clouds\": \"NA\",\"Rainfall\":\"NA\"}\n\n#Weather Thread function\ndef weatherThread(tdelay):\n print()\n print(\"+++++++++++++++++++++++start Weather thread+++++++++++++++++++++++\")\n while True:\n for j in range(3):\n try:\n print(\"#############################################################\")\n print(\"Retrieving Hyderabad,IN weather information-----> \")\n # to get weather info you need to specify a correct api url\n # there are a lot of different urls with different functions\n # they are all documented here http://openweathermap.org/api\n # let's put the http query parameters in a dict\n params = {\n \"APPID\":api_key,\n \"q\":\"Hyderabad,IN\" # <----- here it goes your city\n }\n\n # the following url gets weather information in json based on the name of the city\n url=\"http://api.openweathermap.org/data/2.5/weather\"\n # url resolution and http protocol handling are hidden inside the requests module\n response = requests.get(url,params=params)\n # if we get here, there has been no exception, exit the loop\n break\n except Exception as e:\n print(\"Oops couldn't connect o weather api. Try next time :(\",e)\n try:\n # check status and print the result\n if response.status==200:\n print(\"Got new weather value!!\")\n print(\"-------------\")\n # it's time to parse the json response\n js = json.loads(response.content)\n # super easy!\n Winfo[\"Weather\"] = js[\"weather\"][0][\"description\"]\n Winfo[\"Temperature\"] = js[\"main\"][\"temp\"]-273\n Winfo[\"Pressure\"] = js[\"main\"][\"pressure\"]\n Winfo[\"Humidity\"] = js[\"main\"][\"humidity\"]\n Winfo[\"Windspeed\"] = js[\"wind\"][\"speed\"]\n Winfo[\"Clouds\"] = js[\"clouds\"][\"all\"]\n\n print(\"Weather: \",Winfo[\"Weather\"])\n print(\"Temperature: \",Winfo[\"Temperature\"],\"degrees\")\n print(\"Pressure: \",Winfo[\"Pressure\"],\"hPa\")\n print(\"Humidity: \",Winfo[\"Humidity\"],\"%\")\n print(\"Wind speed: \",Winfo[\"Windspeed\"],\"meter/sec\")\n print(\"Clouds:\",Winfo[\"Clouds\"],\"%\")\n print(\"#############################################################\")\n except Exception as e:\n print(\"Oops dint retreive weather value. Try next time :(\",e)\n\n #thread delay\n sleep(tdelay)\n\n\n#Start weather thread function for every 10 mins\nthread(weatherThread,600000)\n\n# Now let's create a socket and listen for incoming connections on port 80\nsock = socket.socket()\nsock.bind(80)\nsock.listen()\n\n#Webserver Thread function\ndef webserverThread():\n print(\"start Webserver thread\")\n while True:\n try:\n # Type in your browser the board ip!\n print(\"+++++++++++++++++++++++Waiting for Http connection+++++++++++++++++++++++\")\n # here we wait for a connection\n clientsock,addr = sock.accept()\n print(\"Incoming connection from\",addr)\n\n # yes! a connection is ready to use\n # first let's create a SocketStream\n # it's like a serial stream, but with a socket underneath.\n # This way we can read and print to the socket\n client = streams.SocketStream(clientsock)\n\n # let's read all the HTTP headers from the browser\n # stop when a blank line is received\n line = client.readline()\n while line!=\"\\n\" and line!=\"\\r\\n\":\n line = client.readline()\n print(\"HTTP request received!\")\n\n f = open(\"resource://index.html\",'r')\n\n html_response = \"HTTP/1.1 200 OK \\r\\n\"\n html_response += \"Content-Type: text/html\\r\\n\"\n html_response += \"Content-Length: \"+str(f.size)+\"\\r\\n\"\n html_response += \"Connection: close \\r\\n\\r\\n\"\n\n clientsock.send(html_response)\n\n line = f.readline()\n while line:\n line = line.replace('rNum',str(random(0,1000)))\n line = line.replace('qqq',str(Winfo[\"Weather\"]))\n line = line.replace('www',str(Winfo[\"Temperature\"]))\n line = line.replace('eee',str(Winfo[\"Pressure\"]))\n line = line.replace('rrr',str(Winfo[\"Humidity\"]))\n line = line.replace('ttt',str(Winfo[\"Windspeed\"]))\n line = line.replace('yyy',str(Winfo[\"Clouds\"]))\n\n clientsock.send(line)\n line = f.readline()\n\n clientsock.close()\n except Exception as e:\n print(\"ooops, something wrong while serving the http request\",e)\n\n#Start weather thread function\nthread(webserverThread)\n \n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7317, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "streams.serial", "line_number": 41, "usage_type": "call"}, {"api_name": "espressif.esp32net.esp32wifi.auto_init", "line_number": 51, "usage_type": "call"}, {"api_name": "espressif.esp32net.esp32wifi", "line_number": 51, "usage_type": "name"}, {"api_name": "wireless.wifi.link", "line_number": 58, "usage_type": "call"}, {"api_name": "wireless.wifi", "line_number": 58, "usage_type": "name"}, {"api_name": "wireless.wifi.WIFI_WPA2", "line_number": 58, "usage_type": "attribute"}, {"api_name": "wireless.wifi.link_info", "line_number": 69, "usage_type": "call"}, {"api_name": "wireless.wifi", "line_number": 69, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 100, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 138, "usage_type": "call"}, {"api_name": "streams.SocketStream", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "176063333", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 14 18:16:54 2018\n\n@author: loktarxiao\n\"\"\"\nimport numpy as np\nfrom keras import backend as K\nimport tensorflow as tf\n\ndef dice_coef(y_true, y_pred):\n \"\"\"\n \"\"\"\n smooth = 0\n \n y_true = K.round(y_true)\n y_pred = K.round(y_pred)\n \n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n X_all = K.sum(y_true_f)\n Y_all = K.sum(y_pred_f)\n return (2. * intersection + smooth) / (X_all + Y_all + smooth)\n\ndef dilation_img(tensor):\n kernel = tf.ones([5, 5, 1], tf.float32)\n for i in range(10):\n tensor = tf.nn.dilation2d(tensor, \n kernel, \n strides=[1, 1, 1, 1],\n padding=\"SAME\", \n data_format=\"NHWC\", \n dilations = [1, 1, 1, 1])\n return tensor - 10\n\ndef dilation_dice_coef(y_true, y_pred):\n \"\"\"\n \"\"\"\n smooth = 0\n \n y_true = K.round(y_true)\n y_pred = K.round(y_pred)\n \n y_true = dilation_img(y_true)\n y_pred = dilation_img(y_pred)\n \n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n X_all = K.sum(y_true_f)\n Y_all = K.sum(y_pred_f)\n return (2. * intersection + smooth) / (X_all + Y_all + smooth)\n\ndef binary_accuracy(y_true, y_pred):\n \"\"\"\n \"\"\"\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n return tf.keras.metrics.binary_accuracy(y_true_f, y_pred_f)\n\ndef _preprocess(y_true, y_pred):\n pr = K.flatten(y_pred)\n pr = K.greater(pr, 0.5)\n pr = K.cast(pr, K.floatx())\n\n gt = K.flatten(y_true)\n gt = K.greater(gt, 0.5)\n gt = K.cast(gt, K.floatx())\n\n return gt, pr\n\ndef tp(y_true, y_pred):\n y_true, y_pred = _preprocess(y_true, y_pred)\n return K.sum(y_pred * y_true)\n\ndef fp(y_true, y_pred):\n y_true, y_pred = _preprocess(y_true, y_pred)\n return K.sum(y_pred * (1 - y_true))\n\ndef tn(y_true, y_pred):\n y_true, y_pred = _preprocess(y_true, y_pred)\n return K.sum((1 - y_pred) * (1 - y_true))\n\ndef fn(y_true, y_pred):\n y_true, y_pred = _preprocess(y_true, y_pred)\n return K.sum((1 - y_pred) * y_true)\n\ndef M_Recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.constant(K.epsilon()))\n return recall\n\ndef M_Precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.constant(K.epsilon()))\n return precision\n\ndef M_F1(y_true, y_pred):\n precision = M_Precision(y_true, y_pred)\n recall = M_Recall(y_true, y_pred)\n return 2*((precision*recall)/(precision+recall+K.constant(K.epsilon())))\n\n", "sub_path": "Segmentation Networks/LinkNet/src/modules/metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 3373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.backend.round", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 16, "usage_type": "name"}, {"api_name": "keras.backend.round", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 17, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 20, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 21, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 22, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 23, "usage_type": "name"}, {"api_name": "tensorflow.ones", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dilation2d", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "keras.backend.round", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 42, "usage_type": "name"}, {"api_name": "keras.backend.round", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 48, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 49, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 50, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 51, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 52, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 58, "usage_type": "name"}, {"api_name": "keras.backend.flatten", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 59, "usage_type": "name"}, {"api_name": "tensorflow.keras.metrics.binary_accuracy", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "keras.backend.flatten", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 63, "usage_type": "name"}, {"api_name": "keras.backend.greater", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 64, "usage_type": "name"}, {"api_name": "keras.backend.cast", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 65, "usage_type": "name"}, {"api_name": "keras.backend.floatx", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.backend.flatten", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 67, "usage_type": "name"}, {"api_name": "keras.backend.greater", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.backend.cast", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 69, "usage_type": "name"}, {"api_name": "keras.backend.floatx", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 75, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 79, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 83, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 87, "usage_type": "name"}, {"api_name": "keras.backend.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 97, "usage_type": "name"}, {"api_name": "keras.backend.round", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.backend.clip", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 98, "usage_type": "name"}, {"api_name": "keras.backend.round", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.backend.clip", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.backend.constant", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 99, "usage_type": "name"}, {"api_name": "keras.backend.epsilon", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 111, "usage_type": "name"}, {"api_name": "keras.backend.round", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.backend.clip", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.backend.sum", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 112, "usage_type": "name"}, {"api_name": "keras.backend.round", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.backend.clip", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.backend.constant", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 113, "usage_type": "name"}, {"api_name": "keras.backend.epsilon", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.backend.constant", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 119, "usage_type": "name"}, {"api_name": "keras.backend.epsilon", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "151281998", "text": "import serial\nimport time\nimport logging as log\n\nPORT = 'COM4'\nBAUD_RATE = 9600\nARDUINO_RESET_WAIT_S = 2\n\ndef main():\n log.info(\"Initializing Arduino serial communication on port {}\".format(PORT))\n serial_port = serial.Serial(PORT, BAUD_RATE)\n time.sleep(ARDUINO_RESET_WAIT_S)\n log.info(\"Serial communication initialized\")\n\n received_data = serial_port.read(serial_port.inWaiting())\n log.info(received_data)\n \n\nif __name__ == '__main__':\n main()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.info", "line_number": 10, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 11, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "625676662", "text": "from django.contrib import admin\nfrom django.forms import ModelForm, Textarea\nfrom django.db import models\n\n# Register your models here.\nfrom blog.models import Blog\n\nclass BlogAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {\n \t'fields': ('title', 'en_title', 'sub_title', 'pub_date', 'content1', 'midtitle', 'content2')\n \t}),\n ]\n search_fields = ['title']\n formfield_overrides = {\n models.TextField: {'widget': Textarea(\n attrs={'rows': 6,\n 'cols': 140,\n 'style': 'height: 30em;'})},\n models.CharField: {'widget': Textarea(\n attrs={'rows': 6,\n 'cols': 140,\n 'style': 'height: 1em;'})},\n }\n\nadmin.site.register(Blog, BlogAdmin)\n", "sub_path": "blog/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.admin.site.register", "line_number": 26, "usage_type": "call"}, {"api_name": "blog.models.Blog", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "519910589", "text": "\"\"\" **************************************\n *\n Pomppiva neliö *\n *\n************************************** \"\"\" \nimport pygame\n\nLEVEYS = 700\nKORKEUS = 500\nMAILA = 50\nMAILAN_PAIKKA = KORKEUS-70\n\ndef kirjoitaPisteet(pisteet, loppuAika):\n teksti = \"Pisteet: %d Aika: %10.3f\" % (pisteet, loppuAika/1000)\n pisteet_teksti = fontti.render(teksti, True, teksti_vari)\n ikkuna.blit(pisteet_teksti,(50, 30))\n\ndef tarkistaReunaosuma():\n global nelio_x, nelio_y, nelio_muutos_x, nelio_muutos_y, pisteet\n\n # Jos osutaan, vaihdetaan suuntaa päinvastaiseksi kertomalla -1:llä\n if nelio_y > KORKEUS-MAILA or nelio_y < 0:\n nelio_muutos_y = nelio_muutos_y * -1\n if nelio_y > KORKEUS-MAILA: # Pohjakosketus vähentää pisteitä\n pisteet -= 1\n if nelio_x > LEVEYS-MAILA or nelio_x < 0:\n nelio_muutos_x = nelio_muutos_x * -1\n \ndef tarkistaMailaosuma():\n global nelio_x, nelio_y, maila_x, maila_y, nelio_muutos_y, osuma_aani\n # Jos osutaan, vaihdetaan suuntaa päinvastaiseksi kertomalla -1:llä\n if nelio_y + MAILA > MAILAN_PAIKKA and nelio_muutos_y > 0:\n if nelio_x + MAILA > maila_x and nelio_x < maila_x+MAILA:\n nelio_muutos_y = nelio_muutos_y * -1\n osuma_aani.play()\n\ndef siirraPaikkaa():\n global nelio_x, nelio_y, nelio_muutos_x, nelio_muutos_y\n # Siirrä paikkaa\n nelio_x += nelio_muutos_x\n nelio_y += nelio_muutos_y\n \n# Määritellään värit\nmusta = (0, 0, 0)\nvalkoinen = (255, 255, 255)\nsininen = (0, 0, 255)\nmaila_vari = (0, 125, 155)\nteksti_vari = (120,0,200)\n\n# Pygamen initialisointi\npygame.init()\n \n# Asetetaan ikkunan korkeus ja leveys\nikkunan_koko = [LEVEYS, KORKEUS]\nikkuna = pygame.display.set_mode(ikkunan_koko)\n\npygame.display.set_caption(\"Pomppiva neliö\")\n#Haetaan osumaääni\nosuma_aani = pygame.mixer.Sound(\"Boing.wav\")\n\n# Haetaan kello näytön päivitystä varten\nkello = pygame.time.Clock()\n\n# Luodaan fontti\nfontti = pygame.font.Font(None, 60)\nfontti.set_bold(True)\nfontti.set_italic(True)\n\n# Ladataan taustakuva (kuvat ovat samassa kansiossa) \ntaustakuva = pygame.image.load(\"tausta.jpg\") #Kuvan koko sama kuin ikkunan koko\ntaustakuvaRect = taustakuva.get_rect() # Haetaan kuvan suorakulmion koordinaatit\n# Ladataan pallo\npallo = pygame.image.load(\"pallo.gif\") #Pallon koko 50*50\npalloRect = pallo.get_rect()\n\n# Pisteet\npisteet = 20\n# Neliön alkupiste\nnelio_x = 50\nnelio_y = 50\n\n# Mailan alkupaikka\nmaila_x = (LEVEYS - MAILA)/2\nmaila_y = MAILAN_PAIKKA # y-koordinaatti siis 430\nmaila_muutos_x = 0\n\n# Neliön liikkeen nopeus(=muutos) x ja y suunnassa\nnelio_muutos_x = 4\nnelio_muutos_y = 4\n\nloppuAika = 0\n\n# Asetetaan valmis-lippu epätodeksi.\n# Pääohjelman silmukka pyörii niin kauan kuin valmis-lippu on False(=epätosi)\nvalmis = False\n\n# -------- Pääohjelman silmukka -----------\nwhile valmis == False and pisteet > 0: \n for event in pygame.event.get(): # Haetaan tapahtumat, jos niitä on\n if event.type == pygame.QUIT: # Käyttäjä klikkasi Close-komentoa\n valmis = True # Asetetaan valmis-lippu todeksi -> ohjelma loppuu\n\n # Onko käyttäjä painanut näppäintä \n elif event.type == pygame.KEYDOWN:\n # Oliko nuolinäppäin vasemmalle\n if event.key == pygame.K_LEFT:\n maila_muutos_x = -4\n # Oliko nuolinäppäin oikealle\n elif event.key == pygame.K_RIGHT:\n maila_muutos_x = 4\n # Onko käyttäjä vapsiirraPaikkaa()auttanut näppäimen \n elif event.type == pygame.KEYUP:\n # Oliko nuolinäppäin vasemmalle\n if event.key == pygame.K_LEFT:\n maila_muutos_x = 0\n # Oliko nuolinäppäin oikealle\n elif event.key == pygame.K_RIGHT:\n maila_muutos_x = 0 \n\n # Mailan paikka\n maila_x += maila_muutos_x\n \n # Aseteaan taustaväri (black)\n ikkuna.blit(taustakuva, taustakuvaRect)\n\n # Tarkistetaan reuna\n if maila_x > LEVEYS-MAILA or maila_x < 0:\n maila_muutos_x = 0\n \n # KirjoitaPisteet näyttöön\n kirjoitaPisteet(pisteet, loppuAika)\n\n # Piirretään maila\n pygame.draw.rect(ikkuna, maila_vari, [maila_x, maila_y, MAILA, 5])\n\n # Piirretään pallo\n ikkuna.blit(pallo, (nelio_x, nelio_y))\n\n # Siirretään paikkakoordinaatteja\n siirraPaikkaa()\n\n # Tarkistetaan ollaanko osumassa reunaan\n tarkistaReunaosuma()\n\n # Tarkistetaan osutaanko mailaan\n tarkistaMailaosuma()\n \n # Rajoitetaan päivitys 25 frameen sekunnissa\n kello.tick(25)\n loppuAika += 40\n \n # Päivitetään näyttö\n pygame.display.flip()\n# ------------- Pääohjelman silmukka loppuu tähän ----------\n\n# Lopetetaan ohjelma.\npygame.quit()\nsystem.exit()\n\n", "sub_path": "Viikko_5/harjoitus_6_funktiot.py", "file_name": "harjoitus_6_funktiot.py", "file_ext": "py", "file_size_in_byte": 4855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.init", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 153, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "398735993", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 2 15:26:32 2021\nhttps://datascience.stackexchange.com/questions/33338/autoencoder-for-features-selection\n@author: Ashish\n\"\"\"\n\nfrom keras.datasets import mnist\nimport numpy as np\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\n\n\nprint('Training data shape: ', x_train.shape)\nprint('Testing data shape : ', x_test.shape)\n\nimport matplotlib.pyplot as plt\n\n\ntraining_counts = [None] * 10 \ntesting_counts = [None] * 10\nfor i in range(10):\n training_counts[i] = len(y_train[y_train == i])/len(y_train)\n testing_counts[i] = len(y_test[y_test == i])/len(y_test)\n\n# the histogram of the data\ntrain_bar = plt.bar(np.arange(10)-0.2, training_counts, align='center', color = 'r', alpha=0.75, width = 0.41, label='Training')\ntest_bar = plt.bar(np.arange(10)+0.2, testing_counts, align='center', color = 'b', alpha=0.75, width = 0.41, label = 'Testing')\n\nplt.xlabel('Labels')\nplt.xticks((0,1,2,3,4,5,6,7,8,9))\nplt.ylabel('Count (%)')\nplt.title('Label distribution in the training and test set')\nplt.legend(bbox_to_anchor=(1.05, 1), handles=[train_bar, test_bar], loc=2)\nplt.grid(True)\nplt.show()\n\n# utility function for showing images\ndef show_imgs(x_test, decoded_imgs=None, n=10):\n plt.figure(figsize=(20, 4))\n for i in range(n):\n ax = plt.subplot(2, n, i+1)\n plt.imshow(x_test[i].reshape(28,28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n if decoded_imgs is not None:\n ax = plt.subplot(2, n, i+ 1 +n)\n plt.imshow(decoded_imgs[i].reshape(28,28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n plt.show()\n\nshow_imgs(x_train, x_test)\nprint('Training labels: ', y_train[0:10])\nprint('Testing labels : ', y_test[0:10])", "sub_path": "projects/autoencoder_test.py", "file_name": "autoencoder_test.py", "file_ext": "py", "file_size_in_byte": 1934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "288576090", "text": "import math\nimport matplotlib.pyplot as plt\nfrom os import listdir\nimport sys\nimport logging\nimport csv\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nclass Node:\n def __init__(self, p):\n self.points = p\n self.right = None\n self.left = None\n\ndef to_dist_matrix(path):\n \"\"\" Convert a set of points into a distance matrix based on a certain distance measure \"\"\"\n\n onlyfiles = [path+f for f in listdir(path)]\n n = len(onlyfiles)\n dist = [[0 for x in range(n)] for y in range(n)]\n # logger.debug(\"{},{} matrix\".format(len(dist), len(dist[0])))\n i = 0\n for f in onlyfiles:\n stream = csv.reader(open(f), delimiter=\",\")\n data = [result for result in stream]\n j = 0\n for point in data:\n # logger.debug(\"{},{}\".format(i, j))\n # logger.debug(point[0])\n dist[i][j] = float(point[0])\n j += 1\n # logger.debug(j)\n i += 1\n logger.debug(\"{} / {} files read\".format(i, n))\n logger.debug(\"done reading files\")\n return dist\n\"\"\"\ndef flatten(li):\n for item in li:\n try:\n yield from flatten(item)\n except TypeError:\n yield item\n\"\"\"\ndef flatten(items, seqtypes=(list, tuple)):\n for i, x in enumerate(items):\n while i < len(items) and isinstance(items[i], seqtypes):\n items[i:i+1] = items[i]\n return items\n\ndef euclidistance(c1, c2):\n \"\"\" Calculate the distance between two cluster \"\"\"\n dist = .0\n c1p = flatten(c1.points)\n c2p = flatten(c2.points)\n n1 = len(c1p)\n n2 = len(c2p)\n for i in range(n1):\n for j in range(n2):\n p1 = c1p[i]\n p2 = c2p[j]\n dist = dist + math.sqrt((p1-p2)**2)\n dist = dist / (n1*n2)\n return dist\n\ndef upgma(points, k):\n \"\"\" Cluster based on distance matrix dist using Unweighted Pair Group Method with Arithmetic Mean algorithm up to k cluster\"\"\"\n # THIS HAS O(n^3)!!!\n\n # Initialize each cluster with one point\n nodes = []\n n = len(points)\n for i in range(n):\n node = Node(points[i])\n nodes = nodes + [node]\n logger.debug(\"Nodes created\")\n\n # Iterate until the number of clusters is k\n nc = n\n logger.debug(\"Starting clustering\")\n logger.debug(\"{} / {} clusters\".format(nc, k))\n while nc > k:\n # Calculate the pairwise distance of each cluster, while searching for pair with least distance\n c1 = 0\n c2 = 0\n i1 = 0\n i2 = 0\n sdis = 9999999999\n for i in range(nc):\n for j in range(i+1, nc):\n dis = euclidistance(nodes[i], nodes[j])\n if dis < sdis:\n sdis = dis\n c1 = nodes[i]\n c2 = nodes[j]\n i1 = i\n i2 = j\n logger.debug(j)\n logger.debug(\"{}/{}\".format(i, nc))\n # Merge these two nodes into one new node\n node = Node([c1.points, c2.points])\n node.left = c1\n node.right = c2\n\n # Remove the previous nodes, and add the new node\n new_nodes = []\n for i in range(nc):\n if i != i1 and i != i2:\n new_nodes = new_nodes + [nodes[i]]\n new_nodes = new_nodes + [node]\n nodes = new_nodes[:]\n nc = nc - 1\n logger.debug(\"{} / {} clusters\".format(nc, k))\n logger.debug(\"Clustering completed\")\n return nodes\n\ndef print_cluster(nodes):\n for i in range(len(nodes)):\n print (\"cluster \" + str(i))\n print (nodes[i].points)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=\"DEBUG\")\n str(sys.argv)\n print_cluster(upgma(to_dist_matrix(sys.argv[1]), 1))\n", "sub_path": "upgma.py", "file_name": "upgma.py", "file_ext": "py", "file_size_in_byte": 3716, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 26, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 125, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 126, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 127, "usage_type": "attribute"}]} +{"seq_id": "577057295", "text": "#%%\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nveriler = pd.read_csv(\"musteriler.xls\")\r\n\r\nx = veriler.iloc[:,3:]\r\n#%%,\r\nfrom sklearn.cluster import KMeans \r\n\r\nkmeans = KMeans(n_clusters =2,init='k-means++')\r\n\r\nkmeans.fit(x)\r\n\r\n#print(kmeans.cluster_centers_)\r\n\r\nsonuclar = []\r\n\r\nfor i in range(1,10):\r\n kmeans = KMeans(n_clusters = i ,init = 'k-means++',random_state = 123)\r\n kmeans.fit(x)\r\n sonuclar.append(kmeans.inertia_)\r\n \r\nplt.plot(range(1,10),sonuclar)\r\nplt.ylabel(\"WCC değeri\")\r\nplt.show()\r\n\r\n", "sub_path": "bolum16 #K-Means clustering/k-means.py", "file_name": "k-means.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "641724364", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 11 13:48:41 2018\n\n@author: ext_KsPa1\n\"\"\"\n\nimport cv2\nimport sys\nimport numpy as np \nfrom imutils import contours\nfrom skimage import measure\nimport imutils\n\n#r=sys.argv[:]\n\ndef adjust_gamma(image, gamma=1.0):\n\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype(\"uint8\")\n\n return cv2.LUT(image, table)\n\ndef FindSubImage(im1, im2):\n needle = im1\n haystack = im2\n\n result = cv2.matchTemplate(needle,haystack,cv2.TM_CCOEFF_NORMED)\n y,x = np.unravel_index(result.argmax(), result.shape)\n return x,y,result\n\ncap=cv2.VideoCapture('corona_test.mp4') \nimg=cv2.imread('img1.png')\ngray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nwhile(cap.isOpened()):\n ert,frame=cap.read()\n gamma = 0.03 # change the value here to get different result\n adjusted = adjust_gamma(frame, gamma=gamma)\n# frame=frame[int(r[1]):int(r[1]+r[3]),int(r[0]):int(r[0]+r[2])]\n gray = cv2.cvtColor(adjusted, cv2.COLOR_BGR2GRAY)\n# res=cv2.matchTemplate(gray,gray_img,'cv2.TM_CCOEFF')\n \n blurred = cv2.GaussianBlur(gray, (11, 11), 0)\n img_hsv=cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n thresh = cv2.threshold(blurred, 240, 255, cv2.THRESH_BINARY)[1]\n# thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=4)\n thresh = cv2.erode(thresh, None, iterations=4)\n# mask=cv2.inRange(img_hsv,np.array([0,0,100]),np.array([180,255,255])) \n x,y,res = FindSubImage(img,frame)\n print(x,y)\n cv2.rectangle(frame,(x-5,y-5),(5+x,5+y),(0,255,0),2)\n\n cv2.imshow(\"frame\",frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows() \n \n#ret, frame = cap.read()\n#r=cv2.selectROI(frame)\n#frame1=frame[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]\n#print(r)\n#sys.argv=r\n##cv2.imshow('frame',frame1)\n##cv2.waitKey(0)\n#cap.release()\n#cv2.destroyAllWindows()\n#execfile(\"C:/Users/ext_kspa1/Desktop/Corona Ring/test9.py\")\n", "sub_path": "Documents/Corona Ring old work/cam.py", "file_name": "cam.py", "file_ext": "py", "file_size_in_byte": 2059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.LUT", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.unravel_index", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "528675354", "text": "import datetime\n\nimport StockAnalysisSystem.core.api as sasApi\nfrom StockAnalysisSystem.core.Utility.time_utility import *\nfrom StockAnalysisSystem.core.Utility.event_queue import Event\nfrom StockAnalysisSystem.core.SubServiceManager import SubServiceContext\n\n\nSERVICE_ID = '7129e9d2-4f53-4826-9161-c568ced52d02'\n\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nclass UpdateService:\n def __init__(self, sub_service_context: SubServiceContext):\n self.__sub_service_context = sub_service_context\n\n def startup(self):\n # DEBUG: Debug event\n # event = Event('update_service_test', SERVICE_ID)\n # event.update_event_data('update_service_test_flag', 'daily')\n # self.__sub_service_context.sub_service_manager.post_event(event)\n\n # Temporary remove auto update service\n # self.__sub_service_context.register_schedule_event(SERVICE_ID, 17, 0, 0, period='daily')\n # self.__sub_service_context.register_schedule_event(SERVICE_ID, 21, 0, 0, period='weekly')\n\n pass\n\n def handle_event(self, event: Event):\n if event.event_type() == Event.EVENT_SCHEDULE:\n if event.get_event_data().get('period', '') == 'daily':\n self.__do_daily_update()\n elif event.get_event_data().get('period', '') == 'weekly':\n # Friday\n if now_week_days() == 6:\n self.__do_weekly_update()\n elif event.event_type() == 'update_service_test':\n if event.get_event_data().get('update_service_test_flag', '') == 'daily':\n self.__do_daily_update()\n\n # ---------------------------------------------------------------------------------------\n\n def __post_daily_update(self):\n \"\"\"\n For avoiding update conflict. Post update to system queue.\n :return:\n \"\"\"\n pass\n\n def __do_daily_update(self):\n self.__sub_service_context.log('%s: Do daily update.' %\n datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n # DEBUG: Not update this for quick debug\n # self.__update_market_data()\n\n self.__check_update_daily_trade_data('TradeData.Stock.Daily')\n self.__check_update_daily_trade_data('TradeData.Index.Daily')\n self.__check_update_daily_trade_data('Metrics.Stock.Daily')\n\n def __do_weekly_update(self):\n pass\n\n # --------------------------------------------------------------------------\n\n def __update_market_data(self):\n if not sasApi.update('Market.SecuritiesInfo'):\n raise Exception('Market.SecuritiesInfo update error.')\n if not sasApi.update('Market.IndexInfo'):\n raise Exception('Market.IndexInfo update error.')\n if not sasApi.update('Market.TradeCalender'):\n raise Exception('Market.TradeCalender update error.')\n self.__sub_service_context.log('Market data update complete.')\n\n # --------------------------------------------------------------------------\n\n def __check_update_daily_trade_data(self, uri: str) -> bool:\n ret, last_update_time, update_days = \\\n self.__estimate_daily_trade_data_update_range(uri)\n\n if update_days == 0:\n # Newest, not need update.\n return True\n # DEBUG: Test slice update\n if update_days > 1000:\n # More than 100 days, update per each\n ret = self.__update_daily_data_trade_per_each(uri)\n return ret\n else:\n # First try to update by slice\n ret = self.__update_daily_data_trade_by_slice(uri, last_update_time)\n if not ret:\n # If update by slice fail, update per each\n ret = self.__update_daily_data_trade_per_each(uri)\n return ret\n\n def __estimate_daily_trade_data_update_range(self, uri: str) -> (bool, datetime.datetime, int):\n last_update_time = self.__sub_service_context.sas_if.sas_get_last_update_time_from_update_table(uri.split('.'))\n last_update_date = to_date(last_update_time)\n\n if last_update_date is None:\n self.__sub_service_context.log('Error last update time format: ' + str(last_update_time))\n return False, None, 0\n\n date_delta = now().date() - last_update_date\n return True, last_update_time, date_delta.days\n\n def __update_daily_data_trade_by_slice(self, uri: str, since: datetime.datetime) -> bool:\n trading_days = self.__sub_service_context.sas_if.sas_get_trading_days(since, now().date())\n if not isinstance(trading_days, list):\n return False\n if len(trading_days) <= 1:\n return True\n trading_days.pop(0)\n\n for trading_day in trading_days:\n ret = sasApi.update(uri, identity=None, time_serial=trading_day)\n if not ret:\n return False\n return True\n\n def __update_daily_data_trade_per_each(self, uri: str) -> bool:\n ret = sasApi.data_utility().auto_update(uri)\n return ret\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef plugin_prob() -> dict:\n return {\n 'plugin_id': SERVICE_ID,\n 'plugin_name': 'Update Service',\n 'plugin_version': '0.0.0.1',\n 'tags': ['Update', 'Sleepy'],\n }\n\n\ndef plugin_adapt(service: str) -> bool:\n return service == SERVICE_ID\n\n\ndef plugin_capacities() -> list:\n return [\n # 'api', # Provides functions like sys call\n # 'thread', # SubService manager will create a thread for this service\n # 'polling', # polling() function will be invoked while event processing thread is free\n 'event_handler' # SubService can handle events that dispatch to it\n ]\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nupdateService: UpdateService = None\nsubServiceContext: SubServiceContext = None\n\n\ndef init(sub_service_context: SubServiceContext) -> bool:\n try:\n global subServiceContext\n subServiceContext = sub_service_context\n\n global updateService\n updateService = UpdateService(subServiceContext)\n except Exception as e:\n import traceback\n print('Plugin-in init error: ' + str(e))\n print(traceback.format_exc())\n finally:\n pass\n return True\n\n\ndef startup() -> bool:\n updateService.startup()\n return True\n\n\ndef event_handler(event: Event, sync: bool, **kwargs):\n updateService.handle_event(event)\n\n\n\n\n", "sub_path": "StockAnalysisSystem/plugin/SubService/update_service.py", "file_name": "update_service.py", "file_ext": "py", "file_size_in_byte": 6681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "StockAnalysisSystem.core.SubServiceManager.SubServiceContext", "line_number": 15, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.Utility.event_queue.Event", "line_number": 30, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.Utility.event_queue.Event.EVENT_SCHEDULE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "StockAnalysisSystem.core.Utility.event_queue.Event", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "attribute"}, {"api_name": "StockAnalysisSystem.core.api.update", "line_number": 68, "usage_type": "call"}, {"api_name": "StockAnalysisSystem.core.api", "line_number": 68, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.api.update", "line_number": 70, "usage_type": "call"}, {"api_name": "StockAnalysisSystem.core.api", "line_number": 70, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.api.update", "line_number": 72, "usage_type": "call"}, {"api_name": "StockAnalysisSystem.core.api", "line_number": 72, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 109, "usage_type": "attribute"}, {"api_name": "StockAnalysisSystem.core.api.update", "line_number": 118, "usage_type": "call"}, {"api_name": "StockAnalysisSystem.core.api", "line_number": 118, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.api.data_utility", "line_number": 124, "usage_type": "call"}, {"api_name": "StockAnalysisSystem.core.api", "line_number": 124, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.SubServiceManager.SubServiceContext", "line_number": 155, "usage_type": "name"}, {"api_name": "StockAnalysisSystem.core.SubServiceManager.SubServiceContext", "line_number": 158, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 168, "usage_type": "call"}, {"api_name": "StockAnalysisSystem.core.Utility.event_queue.Event", "line_number": 179, "usage_type": "name"}]} +{"seq_id": "4009611", "text": "from coord2dist import coord2dist\nfrom ortools.constraint_solver import pywrapcp\nfrom ortools.constraint_solver import routing_enums_pb2\n\ndef TSP(dist_matrix, num_routes, depot):\n \n # Distance callback\n def create_distance_callback(dist_matrix):\n # Create a callback to calculate distances between cities.\n\n def distance_callback(from_node, to_node):\n return int(dist_matrix[from_node][to_node])\n\n return distance_callback\n\n tsp_size = len(dist_matrix[0])\n \n if tsp_size > 0:\n routing = pywrapcp.RoutingModel(tsp_size, num_routes, depot)\n search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()\n # Create the distance callback.\n dist_callback = create_distance_callback(dist_matrix)\n routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n if assignment:\n # Solution distance.\n print(\"Total distance: \" + str(assignment.ObjectiveValue()) + \" miles\\n\")\n # Display the solution.\n routes = []\n for route_number in range(routing.vehicles()):\n node = routing.Start(route_number) # Index of the variable for the starting node.\n #route_str = ''\n route = []\n \n while not routing.IsEnd(node):\n # Convert variable indices to node indices in the displayed route.\n #route_str += str(routing.IndexToNode(index)) + ' -> '\n index = routing.NodeToIndex(node)\n route.append(index)\n node = assignment.Value(routing.NextVar(node))\n #route_str += str(routing.IndexToNode(index))\n #print \"Route:\\n\\n\" + route_str\n routes.append(route)\n else:\n print('No solution found.')\n return []\n else:\n print('Specify an instance greater than 0.')\n return []\n return routes\n\nfrom random import randint\n\nR = []\nfor i in range(100):\n R += [ [randint(0,101), randint(0,101)] ]\n\n#C = [ [0,0], [100, 0], [100, 1], [0, 100], [1, 100] ]\nprint(TSP(coord2dist(R), 1, 0))\n", "sub_path": "TSP.py", "file_name": "TSP.py", "file_ext": "py", "file_size_in_byte": 2252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "ortools.constraint_solver.pywrapcp.RoutingModel", "line_number": 19, "usage_type": "call"}, {"api_name": "ortools.constraint_solver.pywrapcp", "line_number": 19, "usage_type": "name"}, {"api_name": "ortools.constraint_solver.pywrapcp.RoutingModel.DefaultSearchParameters", "line_number": 20, "usage_type": "call"}, {"api_name": "ortools.constraint_solver.pywrapcp.RoutingModel", "line_number": 20, "usage_type": "attribute"}, {"api_name": "ortools.constraint_solver.pywrapcp", "line_number": 20, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "coord2dist.coord2dist", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "630701429", "text": "import logging\nimport numpy\nfrom fuel.datasets import MNIST\nfrom fuel.schemes import ShuffledScheme\nfrom fuel.streams import DataStream\nfrom fuel.transformers import Transformer\nfrom picklable_itertools import cycle, imap\nfrom utils import AttributeDict\n\nlogger = logging.getLogger('datasets')\n\n\ndef make_datastream(dataset, indices, batch_size,\n n_labeled=None, n_unlabeled=None,\n balanced_classes=True, whiten=None, cnorm=None,\n scheme=ShuffledScheme):\n\n # Ensure each label is equally represented\n logger.info('Balancing %d labels...' % n_labeled)\n all_data = dataset.data_sources[dataset.sources.index('targets')]\n y = all_data.flatten()[indices]\n n_classes = y.max() + 1\n assert n_labeled % n_classes == 0\n n_from_each_class = n_labeled / n_classes\n\n i_labeled = []\n for c in range(n_classes):\n i = (indices[y == c])[:n_from_each_class]\n i_labeled += list(i)\n\n # Get unlabeled indices\n i_unlabeled = indices[:n_unlabeled]\n\n ds = CombinedDataStream(\n data_stream_labeled=MyTransformer(\n DataStream(dataset),\n iteration_scheme=scheme(i_labeled, batch_size)),\n data_stream_unlabeled=MyTransformer(\n DataStream(dataset),\n iteration_scheme=scheme(i_unlabeled, batch_size))\n )\n return ds\n\n\nclass MyTransformer(Transformer):\n def __init__(self, data_stream, iteration_scheme, **kwargs):\n super(MyTransformer, self).__init__(data_stream,\n iteration_scheme=iteration_scheme,\n **kwargs)\n data = data_stream.get_data(slice(data_stream.dataset.num_examples))\n shape = data[0].shape\n self.data = [data[0].reshape(shape[0], -1)]\n self.data += [data[1].flatten()]\n\n def get_data(self, request=None):\n return (s[request] for s in self.data)\n\n\nclass CombinedDataStream(Transformer):\n def __init__(self, data_stream_labeled, data_stream_unlabeled, **kwargs):\n super(Transformer, self).__init__(**kwargs)\n self.ds_labeled = data_stream_labeled\n self.ds_unlabeled = data_stream_unlabeled\n # Rename the sources for clarity\n self.ds_labeled.sources = ('features_labeled', 'targets_labeled')\n # Hide the labels.\n self.ds_unlabeled.sources = ('features_unlabeled',)\n\n @property\n def sources(self):\n if hasattr(self, '_sources'):\n return self._sources\n return self.ds_labeled.sources + self.ds_unlabeled.sources\n\n @sources.setter\n def sources(self, value):\n self._sources = value\n\n def close(self):\n self.ds_labeled.close()\n self.ds_unlabeled.close()\n\n def reset(self):\n self.ds_labeled.reset()\n self.ds_unlabeled.reset()\n\n def next_epoch(self):\n self.ds_labeled.next_epoch()\n self.ds_unlabeled.next_epoch()\n\n def get_epoch_iterator(self, **kwargs):\n unlabeled = self.ds_unlabeled.get_epoch_iterator(**kwargs)\n labeled = self.ds_labeled.get_epoch_iterator(**kwargs)\n assert type(labeled) == type(unlabeled)\n\n return imap(self.mergedicts, cycle(labeled), unlabeled)\n\n def mergedicts(self, x, y):\n return dict(list(x.items()) + list(y.items()))\n\n\ndef get_mnist_data_dict(unlabeled_samples, valid_set_size, test_set=False):\n train_set = MNIST((\"train\",))\n # Make sure the MNIST data is in right format\n train_set.data_sources = (\n (train_set.data_sources[0] / 255.).astype(numpy.float32),\n train_set.data_sources[1])\n\n # Take all indices and permutate them\n all_ind = numpy.arange(train_set.num_examples)\n rng = numpy.random.RandomState(seed=1)\n rng.shuffle(all_ind)\n\n data = AttributeDict()\n\n # Choose the training set\n data.train = train_set\n data.train_ind = all_ind[:unlabeled_samples]\n\n # Then choose validation set from the remaining indices\n data.valid = train_set\n data.valid_ind = numpy.setdiff1d(all_ind, data.train_ind)[:valid_set_size]\n logger.info('Using %d examples for validation' % len(data.valid_ind))\n # Only touch test data if requested\n if test_set:\n data.test = MNIST((\"test\",))\n data.test_ind = numpy.arange(data.test.num_examples)\n\n return data\n", "sub_path": "datasets.py", "file_name": "datasets.py", "file_ext": "py", "file_size_in_byte": 4320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "fuel.schemes.ShuffledScheme", "line_number": 16, "usage_type": "name"}, {"api_name": "fuel.streams.DataStream", "line_number": 36, "usage_type": "call"}, {"api_name": "fuel.streams.DataStream", "line_number": 39, "usage_type": "call"}, {"api_name": "fuel.transformers.Transformer", "line_number": 45, "usage_type": "name"}, {"api_name": "fuel.transformers.Transformer", "line_number": 59, "usage_type": "name"}, {"api_name": "fuel.transformers.Transformer", "line_number": 61, "usage_type": "argument"}, {"api_name": "picklable_itertools.imap", "line_number": 96, "usage_type": "call"}, {"api_name": "picklable_itertools.cycle", "line_number": 96, "usage_type": "call"}, {"api_name": "fuel.datasets.MNIST", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "utils.AttributeDict", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.setdiff1d", "line_number": 122, "usage_type": "call"}, {"api_name": "fuel.datasets.MNIST", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "549845306", "text": "\nfrom .base import *\nfrom .base_class import NN_BASE\n\nimport keras.backend as K\nfrom keras.models import load_model\nK.set_image_dim_ordering('th')\n#GOOD\n\n#incept 3\n# dept 1\n# width 20\n#l2 0.0001\n#opt rmsprop\n\n\n#FOR OIL DATASET\n#2x30 Dense\n#Maxnorm 4 for all\n#nbepoch=100\n#batch=64\n\ndef abs(x):\n return K.abs(x)\nclass NCNET_FILE(NN_BASE):\n\n\n\n def __init__(self,model_path):\n\n self.model_name='NCNET_GAS_PRETRAINED_WITH_OLD_DATA'\n\n self.model_path=model_path\n\n\n self.output_layer_activation='linear'\n\n # Training config\n self.optimizer = 'adam'\n self.loss = 'mse'\n self.nb_epoch = 10000\n self.batch_size = 64\n self.verbose = 0\n\n # Input module config\n self.n_depth = 2\n self.n_width = 20\n self.l2weight = 0.0001\n self.add_thresholded_output = True\n\n #Model inputs/ouputs config\n self.input_tags = {}\n\n self.well_names =['F1','B2','D3','E1']\n\n tags = ['CHK','PDC','PWH','PBH']\n\n for name in self.well_names:\n\n self.input_tags[name] = []\n for tag in tags:\n if (name=='C2' or name=='D1') and tag=='PBH':\n pass\n else:\n self.input_tags[name].append(name + '_' + tag)\n\n self.output_tags = {\n 'F1_out': ['F1_QGAS'],\n 'B2_out': ['B2_QGAS'],\n 'D3_out': ['D3_QGAS'],\n 'E1_out': ['E1_QGAS'],\n 'GJOA_QGAS': ['GJOA_QGAS']\n }\n self.loss_weights = {\n 'F1_out': 0.0,\n 'B2_out': 0.0,\n 'D3_out': 0.0,\n 'E1_out': 0.0,\n 'GJOA_QGAS': 1.0\n }\n\n\n super().__init__()\n\n def initialize_model(self):\n print('Initializing %s' % (self.model_name))\n\n self.model = load_model(self.model_path+'.h5')\n\n\n\n\n\n\n", "sub_path": "Models/NeuralNetworks/NN_from_file.py", "file_name": "NN_from_file.py", "file_ext": "py", "file_size_in_byte": 1862, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.backend.set_image_dim_ordering", "line_number": 7, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 7, "usage_type": "name"}, {"api_name": "keras.backend.abs", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 24, "usage_type": "name"}, {"api_name": "base_class.NN_BASE", "line_number": 25, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "269230679", "text": "# -*- coding: utf-8 -*-\n# @File : train_net_1024.py\n# @Author : Peizhao Li\n# @Contact : peizhaoli05@gmail.com\n# @Date : 2018/10/24\nimport torch.nn.functional as F\n\nimport os.path as osp\n\nfrom model import net_1024\nfrom utils import *\nfrom Generator_MOT15_Anchors import GeneratorMOT15Anchor\n\n\ndef train(parser, generator, log, log_path):\n # print(\"training net_1024\\n\")\n # model = net_1024.net_1024()\n\n print(\"training final\\n\")\n model = net_1024.net_1024()\n\n # \"----------------- pretrained model loading -----------------\"\n # print(\"loading pretrained model\")\n # checkpoint = torch.load(\"/home/lallazhao/MOT/result/Oct-25-at-02-17-net_1024/net_1024_88.4.pth\")\n # checkpoint = torch.load(\"/hdd/yongxinw/MOT17/experiments/debug1/net_1024.pth\")\n # model.load_state_dict(checkpoint[\"state_dict\"])\n # \"------------------------------------------------------------\"\n\n model = model.cuda()\n net_param_dict = model.parameters()\n\n weight = torch.Tensor([10])\n criterion_BCE = torch.nn.BCEWithLogitsLoss(pos_weight=weight).cuda()\n criterion_CE = torch.nn.CrossEntropyLoss().cuda()\n criterion_MSE = torch.nn.MSELoss().cuda()\n criterion_SMOOTHL1 = torch.nn.SmoothL1Loss().cuda()\n\n if parser.optimizer == \"SGD\":\n optimizer = torch.optim.SGD(net_param_dict, lr=parser.learning_rate,\n momentum=parser.momentum, weight_decay=parser.decay, nesterov=True)\n elif parser.optimizer == \"Adam\":\n optimizer = torch.optim.Adam(net_param_dict, lr=parser.learning_rate, weight_decay=parser.decay)\n elif parser.optimizer == \"RMSprop\":\n optimizer = torch.optim.RMSprop(net_param_dict, lr=parser.learning_rate, weight_decay=parser.decay,\n momentum=parser.momentum)\n else:\n raise NotImplementedError\n\n # Main Training and Evaluation Loop\n start_time, epoch_time = time.time(), AverageMeter()\n\n Batch_time = AverageMeter()\n Loss = AverageMeter()\n CLoss = AverageMeter()\n RLoss = AverageMeter()\n Acc = AverageMeter()\n Acc_pos = AverageMeter()\n\n # Initialize visual validation\n val_parser, val_generator, val_log_path = init_visual_validation()\n\n for epoch in range(parser.start_epoch, parser.epochs):\n all_lrs = adjust_learning_rate(optimizer, epoch, parser.gammas, parser.schedule)\n need_hour, need_mins, need_secs = convert_secs2time(epoch_time.avg * (parser.epochs - epoch))\n\n # ----------------------------------- train for one epoch -----------------------------------\n batch_time, loss, classification_loss, regression_loss, acc, acc_pos = \\\n train_net_1024(model, generator, optimizer, criterion_BCE, criterion_CE, criterion_MSE, criterion_SMOOTHL1)\n\n Batch_time.update(batch_time)\n Loss.update(loss.item())\n CLoss.update(classification_loss.item())\n RLoss.update(regression_loss.item())\n Acc.update(acc)\n Acc_pos.update(acc_pos)\n\n if epoch % parser.print_freq == 0 or epoch == parser.epochs - 1:\n print_log('Epoch: [{:03d}/{:03d}]\\t'\n 'Time {batch_time.val:5.2f} ({batch_time.avg:5.2f})\\t'\n 'Match Loss {loss.val:6.3f} ({loss.avg:6.3f})\\t'\n 'Cls Loss {closs.val:6.3f} ({closs.avg:6.3f})\\t'\n 'Reg Loss {rloss.val:6.3f} ({rloss.avg:6.3f})\\t'\n \"Acc {acc.val:6.3f} ({acc.avg:6.3f})\\t\"\n \"Acc_pos {acc_pos.val:6.3f} ({acc_pos.avg:6.3f})\\t\".format(\n epoch, parser.epochs, batch_time=Batch_time, loss=Loss, closs=CLoss, rloss=RLoss,\n acc=Acc, acc_pos=Acc_pos), log)\n\n visual_log(model, epoch, val_parser, val_generator, val_log_path)\n\n Batch_time.reset()\n Loss.reset()\n CLoss.reset()\n RLoss.reset()\n\n\n if (epoch in parser.schedule):\n print_log(\"------------------- adjust learning rate -------------------\", log)\n # -------------------------------------------------------------------------------------------\n\n # measure elapsed time\n epoch_time.update(time.time() - start_time)\n start_time = time.time()\n\n if epoch % 100 == 0:\n save_file_path = osp.join(log_path, \"net_1024.pth\")\n states = {\n \"state_dict\": model.state_dict(),\n }\n torch.save(states, save_file_path)\n # if parser.save_model:\n # save_file_path = osp.join(log_path, \"net_1024.pth\")\n # states = {\n # \"state_dict\": model.state_dict(),\n # }\n # torch.save(states, save_file_path)\n\n\ndef train_net_1024(model, generator, optimizer, criterion_BCE, criterion_CE, criterion_MSE, criterion_SMOOTHL1):\n # switch to train mode\n model.train()\n\n cur_crop, pre_crop, cur_motion, pre_motion, gt_matrix, pos_mask, neg_mask, anchor_class, offsets = generator()\n # print(len(cur_crop), cur_crop[0].shape)\n # print(len(pre_crop), pre_crop[0].shape)\n # print(len(cur_motion), cur_motion[0].shape)\n # print(len(pre_motion), pre_motion[0].shape)\n # print(gt_matrix.shape, type(gt_matrix))\n # exit()\n assert len(cur_crop) == len(cur_motion)\n assert len(pre_crop) == len(pre_motion)\n\n target = torch.from_numpy(gt_matrix).cuda().float().view(-1)\n\n end = time.time()\n\n s0, s1, s2, s3, adj1, adj, box_pred, cls_pred = model(pre_crop, cur_crop, pre_motion, cur_motion)\n loss = criterion_BCE(s0, target)\n loss += criterion_BCE(s1, target)\n loss += criterion_BCE(s2, target)\n loss += criterion_BCE(s3, target)\n # loss += matrix_loss(adj1, gt_matrix, criterion_CE, criterion_MSE)\n # loss += matrix_loss(adj, gt_matrix, criterion_CE, criterion_MSE)\n\n # Post process class predictions (i.e. keep pos_mask + neg_mask, and balance pos and negs)\n pos_inds = np.where(pos_mask)[0]\n neg_inds = np.where(neg_mask)[0]\n # randomly sample twice as many negative indices as positive indices\n rand_negs_subsample_inds = np.random.choice(neg_inds.shape[0], size=pos_inds.shape[0]*2, replace=False)\n keep_inds = list(pos_inds) + list(neg_inds[rand_negs_subsample_inds])\n # print(anchor_class[keep_inds])\n # exit()\n\n classification_loss = criterion_CE(cls_pred[keep_inds], anchor_class[keep_inds].cuda()) * 10\n regression_loss = criterion_SMOOTHL1(box_pred[np.where(pos_mask)], offsets[np.where(pos_mask)].cuda()) * 100\n\n # add classification and regression loss\n loss += classification_loss\n loss += regression_loss\n # s0, s3, adj = model(pre_crop, cur_crop)\n # loss = criterion_BCE(s0, target)\n # loss = criterion_BCE(s3, target)\n # loss += matrix_loss(adj1, gt_matrix, criterion_CE, criterion_MSE)\n # loss += matrix_loss(adj, gt_matrix, criterion_CE, criterion_MSE)\n\n # acc, acc_pos = accuracy(s3.clone(), target.clone())\n acc, acc_pos = accuracy2(adj.clone(), target)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time = time.time() - end\n\n return batch_time, loss, classification_loss, regression_loss, acc, acc_pos\n\n\ndef init_visual_validation():\n val_config = osp.join(os.path.abspath(os.curdir), \"val_config.yml\")\n val_parser, _ = Config(val_config)\n val_generator = GeneratorMOT15Anchor(parser=val_parser, entirety=val_parser.entirety, val=True)\n val_log_path = osp.join(val_parser.result, 'train_mot15_w_detect_3anchors')\n os.makedirs(val_log_path, exist_ok=True)\n return val_parser, val_generator, val_log_path\n\n\ndef visual_log(model, epoch, val_parser, val_generator, val_log_path):\n # define validation information\n\n model.eval()\n image_dir = osp.join(val_log_path, \"images\")\n os.makedirs(image_dir, exist_ok=True)\n\n for it in range(1):\n frame = 6\n cur_crop, pre_crop, cur_motion, pre_motion, gt_matrix, pos_mask, neg_mask, anchor_class, offsets, curr_image, \\\n curr_data, prev_image, prev_data, anchors = val_generator(frame=frame)\n\n with torch.no_grad():\n s0, s1, s2, s3, adj1, adj, box_pred, cls_pred = model(pre_crop, cur_crop, pre_motion, cur_motion)\n\n # predicted matching score\n adj_sig = torch.sigmoid(adj.detach().cpu())\n\n if val_parser.use_gt_match:\n adj_sig = torch.from_numpy(gt_matrix)\n\n # use top k adj scores for match\n scores, match_idx = torch.topk(adj_sig.t(), 1, dim=1)\n\n # mask the indices that are below the threshold\n match_idx[scores < val_parser.threshold] = -1\n\n # x_inds = torch.arange(match_idx.shape[0]).view(-1, 1).repeat(1, match_idx.shape[1]).view(-1)\n prev_boxes = prev_data[:, 2:6]\n gt_ids = prev_data[:, 1]\n max_color = 10\n colors = random_colors(max_color, bright=True)\n\n # Visualize gt boxes\n curr_image_copy = prev_image.copy()\n for i, gt_box in enumerate(prev_boxes):\n id = gt_ids[i]\n color_tmp = tuple([int(tmp * 255) for tmp in colors[int(id % max_color)]])\n curr_image_copy = visualize_boxes(curr_image_copy, [gt_box], width=5, outline=color_tmp)\n\n curr_image_copy.save(osp.join(image_dir, \"prev_{:03d}.jpg\".format(frame + 1)))\n\n # Visualize anchor detection+classification\n # print(cls_pred)\n # print(anchor_class)\n # print(pos_mask)\n\n # Visualize detections\n curr_boxes = curr_data[:, 2:6]\n\n curr_image_copy = curr_image.copy()\n\n colors = random_colors(max_color, bright=False)\n # Draw negative anchors\n for j, anchor_box in enumerate(anchors):\n # predicted class\n cls_j = np.argmax(cls_pred.detach().cpu().numpy()[j])\n\n # if we are in debug mode and want to use some gt information, specify in config\n if val_parser.use_gt_anchor_class:\n cls_j = anchor_class[j]\n\n if cls_j == 0:\n curr_image_copy = visualize_boxes(curr_image_copy, [anchor_box], width=1, outline='white')\n\n # Draw positive anchors\n for j, anchor_box in enumerate(anchors):\n # predicted class\n cls_j = np.argmax(cls_pred.detach().cpu().numpy()[j])\n\n # predicted offset\n offset_j = box_pred.detach().cpu().numpy()[j]\n\n # if we are in debug mode and want to use some gt information, specify in config\n if val_parser.use_gt_anchor_class:\n cls_j = anchor_class[j]\n if val_parser.use_gt_offsets:\n offset_j = offsets[j]\n if cls_j == 1:\n match = match_idx[j]\n match_gt_id = gt_ids[match]\n outline = tuple([int(tmp * 255) for tmp in colors[int(match_gt_id % max_color)]])\n if val_parser.show_aligned_anchors:\n gw, gh = np.exp(offset_j[2:]) * anchor_box[2:]\n gleft, gtop = offset_j[:2] * anchor_box[2:] + anchor_box[:2]\n anchor_box_aligned = [gleft, gtop, gw, gh]\n curr_image_copy = visualize_boxes(curr_image_copy, [anchor_box_aligned], width=3, outline=outline)\n else:\n curr_image_copy = visualize_boxes(curr_image_copy, [anchor_box], width=3, outline=outline)\n\n # visualize the GT\n for i, gt_box in enumerate(curr_boxes):\n id = gt_ids[i]\n color_tmp = tuple([int(tmp * 255) for tmp in colors[int(id % max_color)]])\n curr_image_copy = visualize_boxes(curr_image_copy, [gt_box], width=5, outline=color_tmp)\n curr_image_copy.save(osp.join(image_dir, \"curr_det_{:03d}_ep{:05d}.jpg\".format(frame + 1, epoch)))\n model.train()\n", "sub_path": "train/train_net_1024_detection.py", "file_name": "train_net_1024_detection.py", "file_ext": "py", "file_size_in_byte": 11687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "model.net_1024.net_1024", "line_number": 20, "usage_type": "call"}, {"api_name": "model.net_1024", "line_number": 20, "usage_type": "name"}, {"api_name": "model.cuda", "line_number": 29, "usage_type": "call"}, {"api_name": "model.parameters", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional.Tensor", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.functional.nn.BCEWithLogitsLoss", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.functional.nn", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.functional.nn.CrossEntropyLoss", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional.nn", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.functional.nn.MSELoss", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.functional.nn", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.nn.SmoothL1Loss", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.functional.nn", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.functional.optim.SGD", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.functional.optim", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.functional.optim.Adam", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.functional.optim", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.functional.optim.RMSprop", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.functional.optim", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "name"}, {"api_name": "model.state_dict", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn.functional.save", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 109, "usage_type": "name"}, {"api_name": "model.train", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn.functional.from_numpy", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 132, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.curdir", "line_number": 178, "usage_type": "attribute"}, {"api_name": "Generator_MOT15_Anchors.GeneratorMOT15Anchor", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "name"}, {"api_name": "model.eval", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "name"}, {"api_name": "torch.nn.functional.no_grad", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 198, "usage_type": "name"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 202, "usage_type": "name"}, {"api_name": "torch.nn.functional.from_numpy", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.nn.functional.topk", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 208, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "name"}, {"api_name": "model.train", "line_number": 282, "usage_type": "call"}]} +{"seq_id": "141318967", "text": "#!/usr/bin/env python3\n\n# ----------------------------------------------------------\n\nimport csv\nimport itertools\nimport operator\n\nimport collections\nimport random\n\n# ----------------------------------------------------------\n#\n# Read from the CSV file and save the info to be acted upon\n# \n# ----------------------------------------------------------\n\nopen_csv = open('data.csv', 'r')\ndata_dump = csv.reader(open_csv, delimiter = \";\")\n\nbig_group = []\n\nfor row in data_dump:\n big_group.append(row)\n\n# ----------------------------------------------------------\n# \n# A couple of different approaches to generating the groups\n# \n# 1. sorting the big_group in a variety of ways, then picking four at a time\n# 2. finding the most frequently occuring attribute we care about, and \n# building groups on each of them.\n# 3.\n# 4. \n#\n# ----------------------------------------------------------\n\n# set the group size for lunches\ngroup_size = 4\ngroup_count = len(big_group) // group_size\n\n\n# sort by the attribute we care about (job)\n# random.shuffle(big_group)\n\n\n# create job pools to build groups from\nProgram_Manager \t\t\t= []\nRnD_Manager\t\t\t\t\t= []\nPrincipal_Software_Engineer = []\nProduct_Manager\t\t\t\t= []\nEngineer \t\t\t\t\t= []\nHR_Consultant \t\t\t\t= []\nDirector_Business_Solutions = []\nVisual_Designer \t\t\t= []\nMgr_Program_Management \t\t= []\nSoftware_Engineer \t\t\t= []\nMgr_Quality_Improvement \t= []\nUX_Designer \t\t\t\t= []\nPrincipal_Architect \t\t= []\nEmployee_Comms_Specialist \t= []\nCartography_Designer \t\t= []\nQA_engineer \t\t\t\t= []\n\n\nfor each_person in big_group:\n\n\tjob = each_person[3]\n\n\tif \"Program Manager\" in job:\n\t\tProgram_Manager.append(each_person)\n\telif \"R&D Manager\" in job:\n\t\tRnD_Manager.append(each_person)\n\telif \"Principal Software Engineer\" in job:\n\t\tPrincipal_Software_Engineer.append(each_person)\n\telif \"Product Manager\" in job:\n\t\tProduct_Manager.append(each_person)\n\telif \"Software Engineer\" in job:\n\t\tSoftware_Engineer.append(each_person)\n\telif \"QA engineer\" in job:\n\t\tQA_engineer.append(each_person)\n\telif \"Engineer\" in job:\n\t\tEngineer.append(each_person)\n\telif \"HR Consultant\" in job:\n\t\tHR_Consultant.append(each_person)\n\telif \"Director Business Solutions\" in job:\n\t\tDirector_Business_Solutions.append(each_person)\n\telif \"Visual Designer\" in job:\n\t\tVisual_Designer.append(each_person)\n\telif \"Mgr Program Management\" in job:\n\t\tMgr_Program_Management.append(each_person)\n\telif \"Mgr Quality Improvement\" in job:\n\t\tMgr_Quality_Improvement.append(each_person)\n\telif \"UX Designer\" in job:\n\t\tUX_Designer.append(each_person)\n\telif \"Principal Architect\" in job:\n\t\tPrincipal_Architect.append(each_person)\n\telif \"Employee Comms Specialist\" in job:\n\t\tEmployee_Comms_Specialist.append(each_person)\n\telif \"Cartography Designer\" in job:\n\t\tCartography_Designer.append(each_person)\n\telse:\n\t\tpass\n\n# print(\"\\n\\nSorting large group into smaller pools based on job_title... \\n\")\n\nlarge_group = [Program_Manager,\n\t\t\t\tRnD_Manager,\n\t\t\t\tPrincipal_Software_Engineer,\n\t\t\t\tProduct_Manager,\n\t\t\t\tEngineer,\n\t\t\t\tHR_Consultant,\n\t\t\t\tDirector_Business_Solutions,\n\t\t\t\tVisual_Designer,\n\t\t\t\tMgr_Program_Management,\n\t\t\t\tSoftware_Engineer,\n\t\t\t\tMgr_Quality_Improvement,\n\t\t\t\tUX_Designer,\n\t\t\t\tPrincipal_Architect,\n\t\t\t\tEmployee_Comms_Specialist,\n\t\t\t\tCartography_Designer,\n\t\t\t\tQA_engineer\n\t\t\t\t]\n\n\ndef weighting(given_collection_of_job_groups):\n\n\ttotal_people = 0\n\t\n\tfor each_job_group in given_collection_of_job_groups:\n\t\ttotal_people += len(each_job_group)\n\n\tcount = 1\n\tweights = []\n\n\tfor each_job_group in given_collection_of_job_groups:\n\t\tif len(each_job_group) > 0:\n\t\t\tweights.append(.01*(int(len(each_job_group)/total_people*100)))\n\t\t\tcount += 1\n\t\telse:\n\t\t\tpass\n\n\t#return {total_people : weights}\n\treturn weights\n\ntext_group = ['Program_Manager',\n\t\t\t\t'RnD_Manager',\n\t\t\t\t'Principal_Software_Engineer',\n\t\t\t\t'Product_Manager',\n\t\t\t\t'Engineer',\n\t\t\t\t'HR_Consultant',\n\t\t\t\t'Director_Business_Solutions',\n\t\t\t\t'Visual_Designer',\n\t\t\t\t'Mgr_Program_Management',\n\t\t\t\t'Software_Engineer',\n\t\t\t\t'Mgr_Quality_Improvement',\n\t\t\t\t'UX_Designer',\n\t\t\t\t'Principal_Architect',\n\t\t\t\t'Employee_Comms_Specialist',\n\t\t\t\t'Cartography_Designer',\n\t\t\t\t'QA_engineer'\n\t\t\t\t]\n\n# for x in weighting(large_group):\n# \tprint(x)\nfor x in large_group:\n\t#asdfdasdf[]\n\tprint(\"\\njob group: \")\n\tfor z in x:\n\t\tprint(z)\n\n#-------------------------------------------------------\n# make a sampling of the weighted selection\n\nmillionsamples = random.choices(text_group, weighting(large_group), k=10**6)\ntotes = collections.Counter(millionsamples)\nprint(\"\\n-----------------------------------------------\\n# \" +\n\t\"Expected Distribution based on Sample Sizes\\n-----------------------------------------------\")\nprint(\"\\n ____________________________\\n| Sample Size of One Million |\\n\")\nfor x in totes:\n\tprint(x, \":\", totes[x])\n\n\nthousandsamples = random.choices(text_group, weighting(large_group), k=1000)\ntotes = collections.Counter(thousandsamples)\nprint(\"\\n _____________________________\\n| Sample Size of One Thousand |\\n\")\nfor x in totes:\n\tprint(x, \":\", totes[x])\n\n\nafewsamples = random.choices(text_group, weighting(large_group), k=25)\ntotes = collections.Counter(afewsamples)\nprint(\"\\n ____________________________\\n| Sample Size of Twenty Five |\\n\")\nfor x in totes:\n\tprint(x, \":\", totes[x])\n\n#-------------------------------------------------------\n# make a list of lists for the lunch groups to populate\n\nlist_of_lunch_groups = []\n\nfor x in range(len(big_group) // group_size):\n\tlist_of_lunch_groups.append([])\n\n#-------------------------------------------------------\n# populate each of the lunch groups using the random\n# selection method, applying the weighted distributions\n\nfor z in range(group_size):\n\tfor each_lunch_group in list_of_lunch_groups:\n\t\tgotchya = random.choices(large_group, weighting(large_group))[0]\n\t\t#pickone = random.randint(1,len(gotchya))\n\t\tnowwhat = gotchya.pop(0)\n\t\teach_lunch_group.append(nowwhat)\n\t\t# remove empty groups from the large_group list\n\t\tlarge_group2 = [x for x in large_group if x != []]\n\t\tlarge_group = large_group2\n\n\n#-------------------------------------------------------\n# print the groups to eyeball for errors\n\nprint(\"\\n-------------------------------------------\\n# \" + \n\t\"Lunch Groups\\n-------------------------------------------\\n\")\n\nfor x in large_group:\n\tremainer = x.pop(0)\n\tlist_of_lunch_groups[0].append(remainer)\n\nfor x in list_of_lunch_groups:\n\tfor z in x:\n\t\tprint(z)\n\tprint(\"\\n\")\n\n#-------------------------------------------------------\n\n\n'''\n# create a data object of each person\nclass Lunchee(person_object):\n\t\"\"\"docstring for Lunchee\"\"\"\n\tdef __init__(self, uid, name, email, job_title, location):\n\t\tsuper(Lunchee, self).__init__()\n\t\tself.uid \t\t= uid \n\t\tself.name \t\t= name\n\t\tself.email \t\t= email\n\t\tself.job_title \t= job_title\n\t\tself.location \t= location\n\n\tdef __hash__(self):\n\t\treturn hash((self.uid, self.name, self.email, self.job_title, self.location))\n\n\tdef __eq__(self, other):\n\t\tif not isinstance(other, type(self)): return NotImplemented\n\t\treturn self.uid == other.uid and self.name == other.name and self.email == other.email and self.job_title == other.job_title and self.location == other.location\n'''\n\n\n\n\n# ----------------------------------------------------------\n#\n# Still to do...\n#\n# ----------------------------------------------------------\n\n# compare_to_previous_groups_in_dynamodb(new_lunch_groups) //negated by checking for dupes by class\n# if groups_require_it_reshuffle_groups(new_lunch_groups) \n# trigger_save_group_to_dynamobd(new_lunch_groups)\n# \n\ndef copy_lunchees_to_attendee_database(self, lunchees):\n # check to see if user is in database already\n # add userdata to database\n pass\n\ndef send_out_meeting_point_emails_to_each_group(self, groups_list):\n for each_group in groups_list:\n # create email from template\n # assign a pre-selected meeting point from lists\n # extract 4 email addresses from the group data\n # target the email addresses\n pass\n", "sub_path": "group_maker.py", "file_name": "group_maker.py", "file_ext": "py", "file_size_in_byte": 7902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "csv.reader", "line_number": 19, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 175, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 176, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 184, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 185, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 191, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 192, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "182810353", "text": "# Copyright 2019 Pascal Audet\n#\n# This file is part of RfPy.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nFunctions to calculate piercing points from a velocity model and slowness\nvalues, bin them, and produce CCP stacks using receiver functions.\n\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nfrom scipy.signal import hilbert\nfrom rfpy import binning\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n\nclass CCPimage(object):\n\n def __init__(self, coord_start=[None, None], coord_end=[None, None],\n weights=[0.5, 3., -3.],\n dep=np.array([0., 4., 8., 14., 30., 35., 45., 110.]),\n vs=np.array([4.0, 5.9, 6.2, 6.3, 6.8, 7.2, 8.0, 8.1]),\n vpvs=1.73):\n\n self.radialRF = []\n self.dep = dep\n self.vs = vs\n self.vp = vs*vpvs\n self.weights = weights\n self.xs_lat1 = coord_start[0]\n self.xs_lon1 = coord_start[1]\n self.xs_lat2 = coord_end[0]\n self.xs_lon2 = coord_end[1]\n\n def add_rfstream(self, rfstream):\n\n self.radialRF.append(rfstream)\n\n def prep_data(self, f1, f2ps, f2pps, f2pss, n_depth=120):\n\n # Process streams one at a time\n for RF in self.radialRF:\n\n # Bin RFs into back-azimuth and slowness bins to speed up\n # calculations\n RFbin = binning.bin_baz_slow(\n RF, nbaz=36+1, nslow=40+1, pws=True)[0]\n n_traces = len(RFbin)\n amp_ps_tr = np.empty([n_traces, n_depth])\n amp_pps_tr = np.empty([n_traces, n_depth])\n amp_pss_tr = np.empty([n_traces, n_depth])\n lon_tr = np.empty([n_traces, n_depth])\n lat_tr = np.empty([n_traces, n_depth])\n\n st_ps = RFbin.copy()\n st_pps = RFbin.copy()\n st_pss = RFbin.copy()\n\n # Filter Ps, Pps and Pss\n st_ps.filter(\n 'bandpass', freqmin=f1, freqmax=f2ps,\n corners=4, zerophase=True)\n st_pps.filter(\n 'bandpass', freqmin=f1, freqmax=f2pps,\n corners=4, zerophase=True)\n st_pss.filter(\n 'bandpass', freqmin=f1, freqmax=f2pss,\n corners=4, zerophase=True)\n del RFbin\n\n for itr in range(len(st_ps)):\n\n print('tr ', itr+1, ' out of ', len(st_ps))\n\n # Get raypath and travel time for all phases\n tt_ps, tt_pps, tt_pss, plon, plat, idep = \\\n raypath(st_ps[itr], nz=n_depth,\n dep=self.dep, vp=self.vp, vs=self.vs)\n\n # Now get amplitude of RF at corresponding travel\n # time along the raypath\n depth_array = np.asarray(idep)\n lon_tr[itr, :] = plon\n lat_tr[itr, :] = plat\n\n amp_ps = []\n amp_pps = []\n amp_pss = []\n\n # Loop through travel times and shift RFs to get amplitudes\n for tt in tt_ps:\n a, phase = timeshift(tr_ps[itr], tt)\n amp_ps.append(self.weights[0]*a)\n amp_ps_tr[itr, :] = amp_ps\n\n # Loop through travel times and shift RFs to get amplitudes\n for tt in tt_pps:\n a, phase = timeshift(tr_pps[itr], tt)\n amp_pps.append(self.weights[1]*a)\n amp_pps_tr[itr, :] = amp_pps\n\n # Loop through travel times and shift RFs to get amplitudes\n for tt in tt_pss:\n a, phase = timeshift(tr_pss[itr], tt)\n amp_pss.append(self.weights[2]*a)\n amp_pss_tr[itr, :] = amp_pss\n\n if i_key == 0:\n amp_ps_depth = amp_ps_tr.transpose()\n amp_pps_depth = amp_pps_tr.transpose()\n amp_pss_depth = amp_pss_tr.transpose()\n lon_depth = lon_tr.transpose()\n lat_depth = lat_tr.transpose()\n\n elif i_key > 0:\n amp_ps_depth = np.concatenate(\n (amp_ps_depth, amp_ps_tr.transpose()), axis=1)\n amp_pps_depth = np.concatenate(\n (amp_pps_depth, amp_pps_tr.transpose()), axis=1)\n amp_pss_depth = np.concatenate(\n (amp_pss_depth, amp_pss_tr.transpose()), axis=1)\n lon_depth = np.concatenate(\n (lon_depth, lon_tr.transpose()), axis=1)\n lat_depth = np.concatenate(\n (lat_depth, lat_tr.transpose()), axis=1)\n\n i_key += 1\n\n self.amp_ps_depth = amp_ps_depth\n self.amp_pps_depth = amp_pps_depth\n self.amp_pss_depth = amp_pss_depth\n self.lon_depth = lon_depth\n self.lat_depth = lat_depth\n self.depth_array = depth_array\n\n del self.radialRF\n\n def prestack(self, cell_length=1.):\n\n (n_depth, n_traces) = self.lon_depth.shape\n\n # Specify coordinates of cross-section end points\n earth_radius = 6371 # kilometres\n\n # Get total length of grid from end points\n xs_length = haversine(self.xs_lat1, self.xs_lon1,\n self.xs_lat2, self.xs_lon2)\n\n # number of cells laterally for specified cell_length (rounded)\n n_lateral = int(np.rint(xs_length/cell_length))\n\n xs_latitudes = np.asarray(np.linspace(xs_lat1, xs_lat2, n_lateral))\n xs_longitudes = np.asarray(np.linspace(xs_lon1, xs_lon2, n_lateral))\n lateral_distances = np.arange(n_lateral)*cell_length\n\n xs_amps_ps = np.zeros((n_depth, n_lateral, n_traces))\n xs_amps_pps = np.zeros((n_depth, n_lateral, n_traces))\n xs_amps_pss = np.zeros((n_depth, n_lateral, n_traces))\n\n for i_depth in range(n_depth):\n print('i_depth for Grid loop', i_depth+1, ' out of ', n_depth)\n\n for i_coor in range(n_traces):\n\n lat_tr = lat_depth[i_depth, i_coor]\n lon_tr = lon_depth[i_depth, i_coor]\n distance_tests = np.empty(n_lateral)\n\n for i_xs in range(n_lateral):\n lat_xs = xs_latitudes[i_xs]\n lon_xs = xs_longitudes[i_xs]\n distance_tests[i_xs] = haversine(\n lat_xs, lon_xs, lat_tr, lon_tr)\n\n minimum_distance = np.amin(distance_tests)\n i_cell = np.where(distance_tests ==\n np.amin(distance_tests))[0][0]\n\n nonzero_count = np.count_nonzero(\n xs_amps_ps[i_depth, i_cell, :])\n new_amp_ps = self.amp_ps_depth[i_depth, i_coor]\n if xs_amps_ps[i_depth, i_cell, 0] == 0.:\n xs_amps_ps[i_depth, i_cell, 0] = new_amp_ps\n else:\n xs_amps_ps[i_depth, i_cell, nonzero_count] = new_amp_ps\n\n nonzero_count = np.count_nonzero(\n xs_amps_pps[i_depth, i_cell, :])\n new_amp_pps = self.amp_pps_depth[i_depth, i_coor]\n if xs_amps_pps[i_depth, i_cell, 0] == 0.:\n xs_amps_pps[i_depth, i_cell, 0] = new_amp_pps\n else:\n xs_amps_pps[i_depth, i_cell, nonzero_count] = new_amp_pps\n\n nonzero_count = np.count_nonzero(\n xs_amps_pss[i_depth, i_cell, :])\n new_amp_pss = self.amp_pss_depth[i_depth, i_coor]\n if xs_amps_pss[i_depth, i_cell, 0] == 0.:\n xs_amps_pss[i_depth, i_cell, 0] = new_amp_pss\n else:\n xs_amps_pss[i_depth, i_cell, nonzero_count] = new_amp_pss\n\n self.xs_amps_ps = xs_amps_ps\n self.xs_amps_pps = xs_amps_pps\n self.xs_amps_pss = xs_amps_pss\n self.lateral_distances = lateral_distances\n self.n_lateral = n_lateral\n self.n_depth = n_depth\n\n def ccp(self):\n\n xs_ps_avg = np.zeros((self.n_depth, self.n_lateral))\n xs_pps_avg = np.zeros((self.n_depth, self.n_lateral))\n xs_pss_avg = np.zeros((self.n_depth, self.n_lateral))\n\n for i_depth in range(self.n_depth):\n print('i_depth for Average loop',\n i_depth, ' out of ', self.n_depth)\n\n for i_cell in range(self.n_lateral):\n\n nonzero_count = np.count_nonzero(\n self.xs_amps_ps[i_depth, i_cell, :])\n if nonzero_count != 0:\n amps_ps = self.xs_amps_ps[i_depth, i_cell, 0:nonzero_count]\n xs_ps_avg[i_depth, i_cell] = np.mean(amps_ps)\n\n nonzero_count = np.count_nonzero(\n self.xs_amps_pps[i_depth, i_cell, :])\n if nonzero_count != 0:\n amps_pps = self.xs_amps_pps[i_depth,\n i_cell, 0:nonzero_count]\n xs_pps_avg[i_depth, i_cell] = np.mean(amps_pps)\n\n nonzero_count = np.count_nonzero(\n self.xs_amps_pss[i_depth, i_cell, :])\n if nonzero_count != 0:\n amps_pss = self.xs_amps_pss[i_depth,\n i_cell, 0:nonzero_count]\n xs_pss_avg[i_depth, i_cell] = np.mean(amps_pss)\n\n self.xs_ps_avg = xs_ps_avg\n self.xs_pps_avg = xs_pps_avg\n self.xs_pss_avg = xs_pss_avg\n\n def gccp(self, wlen=15.):\n\n dlat = max(self.lateral_distances)/self.n_lateral\n\n import scipy.ndimage as ndimage\n\n self.xs_gauss_ps = ndimage.filters.gaussian_filter(\n self.xs_ps_avg, sigma=(0, wlen/dlat))\n self.xs_gauss_pps = ndimage.filters.gaussian_filter(\n self.xs_pps_avg, sigma=(0, wlen/dlat))\n self.xs_gauss_pss = ndimage.filters.gaussian_filter(\n self.xs_pss_avg, sigma=(0, wlen/dlat))\n\n def stack_ccp(self):\n\n tot_trace = np.zeros((self.n_depth, self.n_lateral))\n\n for i_cell in range(self.n_lateral):\n ps_trace = self.xs_ps_avg[:, i_cell]\n pps_trace = self.xs_pps_avg[:, i_cell]\n pss_trace = self.xs_pss_avg[:, i_cell]\n tot_trace[:, i_cell] = (ps_trace + pps_trace + pss_trace)\n\n self.tot_trace_ccp = tot_trace\n\n def pws_gccp(self):\n\n tot_trace = np.zeros((n_depth, n_lateral))\n\n for i_cell in range(n_lateral):\n ps_trace = self.xs_gauss_ps[:, i_cell]\n pps_trace = self.xs_gauss_pps[:, i_cell]\n pss_trace = self.xs_gauss_pss[:, i_cell]\n\n weight = np.zeros(len(ps_trace), dtype=complex)\n ps_hilb = hilbert(ps_trace)\n ps_phase = np.arctan2(ps_hilb.imag, ps_hilb.real)\n weight += np.exp(1j*ps_phase)\n\n pps_hilb = hilbert(pps_trace)\n pps_phase = np.arctan2(pps_hilb.imag, pps_hilb.real)\n weight += np.exp(1j*pps_phase)\n\n pss_hilb = hilbert(pss_trace)\n pss_phase = np.arctan2(pss_hilb.imag, pss_hilb.real)\n weight += np.exp(1j*pss_phase)\n\n weight = np.abs(weight)/3.\n\n tot_trace[:, i_cell] = (ps_trace + pps_trace + pss_trace)*weight**2\n\n self.tot_trace_gccp = tot_trace\n\n def plot_ccp(self, vmin=-0.015, vmax=0.015, save=False, form='png'):\n\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(\n 4, 1, figsize=(8.5, 8))\n\n # plt.pcolormesh(lateral_distances,depth_array,xs_ps_avg,cmap=cm.coolwarm,vmin=vmin,vmax=vmax)\n ax1.pcolormesh(self.lateral_distances, self.depth_array,\n self.xs_ps_avg, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax1.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax1.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n # ax1.set_xlabel('Lateral Distance (km)', size=10)\n ax1.set_ylabel('Depth (km)', size=10)\n ax1.set_title('Ps CCP image', size=10)\n ax1.invert_yaxis()\n\n # plt.pcolormesh(lateral_distances,depth_array,xs_pps_avg,cmap=cm.coolwarm,vmin=vmin,vmax=vmax)\n ax2.pcolormesh(self.lateral_distances, self.depth_array,\n self.xs_pps_avg, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax2.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax2.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n # ax2.set_xlabel('Lateral Distance (km)', size=10)\n ax2.set_ylabel('Depth (km)', size=10)\n ax2.set_title('Pps CCP image', size=10)\n ax2.invert_yaxis()\n\n ax3.pcolormesh(self.lateral_distances, self.depth_array,\n self.xs_pss_avg, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax3.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax3.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n # ax3.set_xlabel('Lateral Distance (km)', size=10)\n ax3.set_ylabel('Depth (km)', size=10)\n ax3.set_title('Pss CCP image', size=10)\n ax3.invert_yaxis()\n\n ax4.pcolormesh(self.lateral_distances, self.depth_array,\n self.tot_trace_ccp, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax4.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax4.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n ax4.set_xlabel('Lateral Distance (km)', size=10)\n ax4.set_ylabel('Depth (km)', size=10)\n ax4.set_title('Weighted CCP image', size=10)\n ax4.invert_yaxis()\n\n if save:\n plt.savefig('FIGURES/ccp.' + it + '.' + form)\n\n plt.show()\n\n def plot_gccp(self, vmin=-0.015, vmax=0.015, save=False, form='png'):\n\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(\n 4, 1, figsize=(8.5, 8))\n\n # plt.pcolormesh(lateral_distances,depth_array,xs_ps_avg,cmap=cm.coolwarm,vmin=vmin,vmax=vmax)\n ax1.pcolormesh(self.lateral_distances, self.depth_array,\n self.xs_gauss_ps, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax1.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax1.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n # ax1.set_xlabel('Lateral Distance (km)', size=10)\n ax1.set_ylabel('Depth (km)', size=10)\n ax1.set_title('Ps GCCP image', size=10)\n ax1.invert_yaxis()\n\n # plt.pcolormesh(lateral_distances,depth_array,xs_pps_avg,cmap=cm.coolwarm,vmin=vmin,vmax=vmax)\n ax2.pcolormesh(self.lateral_distances, self.depth_array,\n self.xs_gauss_pps, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax2.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax2.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n # ax2.set_xlabel('Lateral Distance (km)', size=10)\n ax2.set_ylabel('Depth (km)', size=10)\n ax2.set_title('Pps GCCP image', size=10)\n ax2.invert_yaxis()\n\n ax3.pcolormesh(self.lateral_distances, self.depth_array,\n self.xs_gauss_pss, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax3.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax3.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n # ax3.set_xlabel('Lateral Distance (km)', size=10)\n ax3.set_ylabel('Depth (km)', size=10)\n ax3.set_title('Pss GCCP image', size=10)\n ax3.invert_yaxis()\n\n ax4.pcolormesh(self.lateral_distances, self.depth_array,\n self.tot_trace_gccp, cmap=cm.RdBu_r, vmin=vmin, vmax=vmax)\n bar = plt.colorbar()\n ax4.set_xlim((min(self.lateral_distances)),\n (max(self.lateral_distances)))\n ax4.set_ylim((min(self.depth_array)), (max(self.depth_array)))\n bar.ax.set_ylabel('Amplitude', size=10)\n ax4.set_xlabel('Lateral Distance (km)', size=10)\n ax4.set_ylabel('Depth (km)', size=10)\n ax4.set_title('Phase-weighted GCCP image', size=10)\n ax4.invert_yaxis()\n\n if save:\n plt.savefig('FIGURES/gccp.' + it + '.' + form)\n\n plt.show()\n\n\ndef ppoint_distance(tr, dz, vs):\n \"\"\"\n Calculate horizontal distance for interval dz and velocity vs\n\n Parameters\n ----------\n tr : :class:`~obspy.core.Trace`\n Single trace object to migrate to depth\n dz : float\n Vertical sampling distance\n vs : float\n S-wave velocity (km/s)\n\n \"\"\"\n\n # Calculate distance\n dx = dz*np.tan(np.arcsin(slow*vs))\n\n return dx\n\n\ndef ppoint(tr, dist):\n \"\"\"\n Determine geographic location of piercing point\n\n Parameters\n ----------\n tr : :class:`~obspy.core.Trace`\n Single trace object to migrate to depth\n dist : float\n Horizontal istance from the station (km)\n\n \"\"\"\n\n # Conversion factors\n lat2km = 111.\n lon2km = 90.\n\n # Get lat and lon of station location\n slat = tr.stats.stla\n slon = tr.stats.stlo\n\n # Back-azimuth of event\n baz = tr.stats.baz*np.pi/180.\n\n # location of piercing point on geographical grid\n plat = dist*np.sin(-baz+np.pi/2.)/lat2km + slat\n plon = dist*np.cos(-baz+np.pi/2.)/lon2km + slon\n\n return plon, plat\n\n\ndef ttime(tr, dz, vp, vs, phase=None):\n \"\"\"\n Calculate travel time for interval dz and velocities vp and vs\n\n Parameters\n ----------\n tr : :class:`~obspy.core.Trace`\n Single trace object to migrate to depth\n dz : float\n Vertical sampling distance (km)\n vp : float\n P-wave velocity\n vs : float\n S-wave velocity\n phase : str\n Phase of interest\n \"\"\"\n\n # Get horizontal slowness\n slow = tr.stats.slow\n\n # Calculate travel time for phase\n if phase == 'Ps':\n tt = dz*(np.sqrt((1./vs)**2 - slow**2) -\n np.sqrt((1./vp)**2 - slow**2))\n elif phase == 'Pps':\n tt = dz*(np.sqrt((1./vs)**2 - slow**2) +\n np.sqrt((1./vp)**2 - slow**2))\n elif phase == 'Pss':\n tt = 2.*dz*(np.sqrt((1./vs)**2 - slow**2))\n else:\n print('Error - unrecognized phase, ', phase)\n print('Returning tt = 0')\n tt = 0.\n\n return tt\n\n\ndef timeshift(tr, tt):\n \"\"\"\n Shift a trace by a travel time tt and take amplitude at zero\n\n Parameters\n ----------\n tr : :class:`~obspy.core.Trace`\n Single trace object to migrate to depth\n tt : float\n Travel time (sec)\n\n \"\"\"\n\n # Define frequencies\n nt = int(tr.stats.npts)\n dt = tr.stats.delta\n freq = np.fft.fftfreq(int(nt), d=dt)\n\n # Hilbert transform and instantaneous phase\n hilb = hilbert(tr.data)\n hilb_index = np.rint(tt/dt)\n hilb_tt = hilb[int(hilb_index)]\n hilb_tt_phase = np.arctan2(hilb_tt.imag, hilb_tt.real)\n\n # Fourier transform\n ftr = np.fft.fft(tr.data)\n\n # Shift using Fourier transform\n for i in range(len(freq)):\n # Fourier timeshift theorem\n ftr[i] = ftr[i]*np.exp(2.*np.pi*1j*freq[i]*tt)\n\n # Back to time domain (inverse Fourier transform)\n rtr = np.fft.ifft(ftr)\n\n # Take first sample from trace (convert to real value)\n amp = np.real(rtr[0])\n\n return amp, hilb_tt_phase\n\n\ndef raypath(tr, nz=50, dep=None, vp=None, vs=None):\n \"\"\"\n Calculate travel times through velocity model for all phases of interest\n\n Parameters\n ----------\n tr : :class:`~obspy.core.Trace`\n Single trace object to migrate to depth\n nz : int\n Number of layers in interpolation\n dep : :class:`~numpy.ndarray`\n Depth array for velocity model\n vp : :class:`~numpy.ndarray`\n P-wave velocity array for velocity model\n vs : :class:`~numpy.ndarray`\n S-wave velocity array for velocity model\n\n \"\"\"\n\n # Define arrays with zeros\n plat = np.zeros(nz)\n plon = np.zeros(nz)\n ttps = np.zeros(nz)\n ttpps = np.zeros(nz)\n ttpss = np.zeros(nz)\n\n # Default velocity model - can be updated later\n if (dep is None) and (vp is None) and (vs is None):\n dep = np.array([0., 4., 8., 14., 25.9, 35.7, 45., 110., 200.])\n vp = np.array([4.0, 5.9, 6.2, 6.3, 6.8, 7.2, 8.0, 8.1, 8.2])\n vs = vp/1.73\n\n # Get regular depth array\n idep = np.linspace(dep.min(), dep.max(), nz)\n\n # Interpolate Vp and Vs models on depth grid\n ivp = sp.interpolate.interp1d(dep, vp, kind='linear')(idep)\n ivs = sp.interpolate.interp1d(dep, vs, kind='linear')(idep)\n\n # Get exact depth interval\n dz = idep[1] - idep[0]\n\n # Now loop through all depths\n for iz in range(nz):\n\n # Initialize travel time and distance counters\n dtps = 0.\n dtpps = 0.\n dtpss = 0.\n dx = 0.\n\n # Sum over depths from 0 to iz\n for i in range(iz):\n dtps += ttime(tr, dz, ivp[i], ivs[i], 'Ps')\n dtpps += ttime(tr, dz, ivp[i], ivs[i], 'Pps')\n dtpss += ttime(tr, dz, ivp[i], ivs[i], 'Pss')\n dx += ppoint_distance(tr, dz, ivs[i])\n\n # Get piercing point from distance\n plo, pla = ppoint(tr, dx)\n\n # Assign values to arrays\n ttps[iz] = dtps\n ttpps[iz] = dtpps\n ttpss[iz] = dtpss\n plon[iz] = plo\n plat[iz] = pla\n\n return ttps, ttpps, ttpss, plon, plat, idep\n\n\ndef haversine(lat, lon, xs_lat, xs_lon): # great-circle distance (kilometres)\n lat = np.radians(lat)\n lon = np.radians(lon)\n xs_lat = np.radians(xs_lat)\n xs_lon = np.radians(xs_lon)\n dlat = lat - xs_lat\n dlon = lon - xs_lon\n a = ((np.sin(dlat/2.))**2.) + \\\n (np.cos(xs_lat)*np.cos(lat)*((np.sin(dlon/2.))**2.))\n distance = np.abs(2.*earth_radius*np.arcsin(np.sqrt(a)), dtype=float)\n\n return np.abs(distance)\n", "sub_path": "rfpy/ccp.py", "file_name": "ccp.py", "file_ext": "py", "file_size_in_byte": 23333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "rfpy.binning.bin_baz_slow", "line_number": 66, "usage_type": "call"}, {"api_name": "rfpy.binning", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.rint", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 260, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 272, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 272, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 272, "usage_type": "name"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 274, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 274, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 274, "usage_type": "name"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 276, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 276, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 276, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 300, "usage_type": "call"}, {"api_name": "scipy.signal.hilbert", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 303, "usage_type": "call"}, {"api_name": "scipy.signal.hilbert", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 307, "usage_type": "call"}, {"api_name": "scipy.signal.hilbert", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 326, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 327, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 327, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 339, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 351, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 352, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 352, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 363, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 363, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 377, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 381, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 381, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 386, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 386, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 399, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 399, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 400, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 411, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 411, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 412, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 412, "usage_type": "name"}, {"api_name": "matplotlib.cm.RdBu_r", "line_number": 423, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 423, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 424, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 424, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 435, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 435, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 437, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 437, "usage_type": "name"}, {"api_name": "numpy.tan", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 483, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 486, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 487, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 487, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.fft.fftfreq", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 546, "usage_type": "attribute"}, {"api_name": "scipy.signal.hilbert", "line_number": 549, "usage_type": "call"}, {"api_name": "numpy.rint", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 555, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 560, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 560, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 563, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 563, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 566, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 592, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 594, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 599, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 600, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 604, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 607, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 607, "usage_type": "attribute"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 608, "usage_type": "call"}, {"api_name": "scipy.interpolate", "line_number": 608, "usage_type": "attribute"}, {"api_name": "numpy.radians", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 644, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 645, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 649, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 650, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 650, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 651, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 651, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 651, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 653, "usage_type": "call"}]} +{"seq_id": "168902595", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom random import choice\nfrom fake_useragent import UserAgent\nua = UserAgent()\n\n\n# params = {\"q\":\"\"}\n# keywords = [\n# \"small folding boxes\",\n# \"folding boxes uk\",\n# \"folding container\"\n# ]\n# URL = 'http://pythonprogramming.net/search/'\nURL = 'https://www.alibaba.com/trade/search/'\n\nproxies_arr = [\n 'http://123.207.30.131:80',\n 'http://223.241.79.40:18118',\n 'http://123.233.53.118:8118',\n 'http://114.229.189.140:808',\n 'http://123.233.53.118:8118',\n ]\nparams = {\n \"fsb\": 'y',\n \"IndexArea\": 'product_en',\n \"CatId\": '',\n \"SearchText\": '',\n}\n\nBase_URL = 'https://www.alibaba.com/products/'\ncompany_name = 'Shanghai Join Plastic Products Co., Ltd.'\nimport pymongo\n# myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\nmyclient = pymongo.MongoClient(\"mongodb://47.74.64.136:27017/\")\nmydb = myclient[\"joinplastic\"]\nmycol = mydb[\"keywords\"]\nnesting_arr = list(mycol.find({\"category\": \"nesting boxes\"}))\n\n\n\nnesting_arr_len = len(nesting_arr)\n\nfor index, x in enumerate(nesting_arr):\n keyword = x[\"keyword\"]\n print(\"Searching keyword: {0} ({1} of {2})\".format(keyword, str(index+1), nesting_arr_len))\n\n page = 1\n # URL = 'https://www.alibaba.com/products/{0}/{1}.html'.format(\n # keyword.replace(' ', \"_\"), page)\n\n params[\"SearchText\"] = keyword\n proxies = {\n \"http\": choice(proxies_arr)\n }\n headers = {'user-agent': ua.random}\n r = requests.get(URL, headers=headers, params=params)\n status = r.status_code\n print(status)\n soup = BeautifulSoup(r.text, \"html.parser\")\n findMatch = False\n products_list = soup.select('div[data-content=\"abox-ProductNormalList\"] .m-product-item')\n if products_list:\n \n for p_index, product in enumerate(products_list):\n title = product.select(\n \".item-content .item-sub img.util-valign-inner\")[0][\"alt\"]\n company = product.select(\n \".item-content .item-extra a\")[0].get_text().strip()\n if company == company_name:\n mycol.update({'keyword': keyword}, {\n \"$set\": {'ali_rank': {company_name: p_index+1}}})\n findMatch = True\n break\n if p_index == len(products_list)-1 and findMatch == False:\n mycol.update({'keyword': keyword}, {\"$set\": {'ali_rank': {company_name: 0}}})\n print(title+company)\n else:\n products_list = soup.select('div[data-content=\"abox-ProductNormalList\"] .m-gallery-product-item-wrap')\n for p_index,product in enumerate(products_list):\n title = product.select(\n \".item-main .item-img-inner .offer-image-box img\")[0][\"alt\"]\n company = product.select(\n \".item-main .item-info .stitle a\")[0].get_text().strip()\n if company == company_name:\n mycol.update({'keyword': keyword}, {\"$set\": {'ali_rank': {company_name: p_index+1}}})\n findMatch = True\n break\n if p_index == len(products_list)-1 and findMatch == False:\n mycol.update({'keyword': keyword}, {\n \"$set\": {'ali_rank': {company_name: 0}}})\n print(title+company)\n\n\n", "sub_path": "Python/requests_alibabaDynamic.py", "file_name": "requests_alibabaDynamic.py", "file_ext": "py", "file_size_in_byte": 3286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "fake_useragent.UserAgent", "line_number": 5, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 35, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 54, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 57, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "173961340", "text": "import nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\n\n\nraw_news = open(\"hi.txt\", \"r\")\nnews = raw_news.read()\nstop_words = set(stopwords.words(\"english\"))\n\nwords = word_tokenize(news)\n\nfiltered_news = []\n\nfor i in words:\n if i not in stop_words:\n filtered_news.append(i)\n\nprint(filtered_news)\n\n# words = [\"cats\",\"geese\"]\n\nlemmatizer = WordNetLemmatizer()\nlemmatized_news = []\nfor l in filtered_news:\n lemmatized_news.append(lemmatizer.lemmatize(l))\n\nprint(lemmatized_news)\n\ntagged_news = []\n\ndef process_tag():\n try:\n for i in lemmatized_news:\n words = word_tokenize(i)\n tagged_news.append(nltk.pos_tag(words))\n\n except Exception as e:\n print(str(e))\n\n\nprocess_tag()\nprint(tagged_news)\n\n\n\n# def process_chunk():\n# try:\n# for j in lemmatized_news:\n# words1 = nltk.word_tokenize(j)\n# tagged_news1 = nltk.pos_tag(words1)\n# chunkGram = r\"\"\"Chunk: {**+?}\"\"\"\n# chunkParser = nltk.RegexpParser(chunkGram)\n# chunked_news = chunkParser.parse(tagged_news1)\n# chunked_news.draw()\n#\n# except Exception as e:\n# print(str(e))\n#\n# process_chunk()\n\n\n\nps = PorterStemmer()\nstemmed_news = []\n\nfor w in lemmatized_news:\n stemmed_news.append(ps.stem(w))\n\nprint(stemmed_news)\n\n\n\n# def process_content():\n# try:\n# for i in lemmatized_news:\n# words = nltk.word_tokenize(i)\n# tagged = nltk.pos_tag(words)\n#\n# namedEnt = nltk.ne_chunk(tagged)\n#\n# namedEnt.draw()\n#\n# except Exception as e:\n# print(str(e))\n#\n#\n# process_content()\n\n\n\n\n\n", "sub_path": "back-end/senti.py", "file_name": "senti.py", "file_ext": "py", "file_size_in_byte": 1781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 10, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 10, "usage_type": "name"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 24, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 36, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 37, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "521200832", "text": "from bottle import route, run, response, request, install, template\nfrom bottle_sqlite import SQLitePlugin\n\nfrom smeterd import storage\nfrom smeterd import utils\n\n\n\ndef catch_exceptions(fn):\n def wrapper(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n response.status= 400\n return '%s: %s\\n' % (type(e).__name__, str(e))\n return wrapper\n\n\ndef respond_in_plaintext(fn):\n def wrapper(*args, **kwargs):\n response.content_type = 'text/plain; charset=\"UTF-8\"'\n return fn(*args, **kwargs)\n return wrapper\n\n\n\n\n\nTABLE_TPL = '''\n\n\n%for header in result[0].keys():\n \n%for row in result:\n \n %for col in row:\n \n %end\n \n%end\n
{{header}}\n%end\n
{{col}}
'''\n\n\n@route('/', method='GET', apply=[catch_exceptions])\ndef index(db):\n data = storage.generate_report(db)\n if len(data) == 0:\n return ''\n return template(TABLE_TPL, result=data)\n\n@route('/current', method='GET', apply=[respond_in_plaintext, catch_exceptions])\ndef current():\n from smeterd.meter import read_one_packet\n return read_one_packet()\n\n\n\ndef start_webserver(host, port, db, auto_reload=False):\n db = utils.get_absolute_path(db)\n install(SQLitePlugin(dbfile=db))\n run(host=host, port=port, reloader=auto_reload)\n\n", "sub_path": "smeterd/webserver.py", "file_name": "webserver.py", "file_ext": "py", "file_size_in_byte": 1450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "bottle.response.status", "line_number": 14, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 14, "usage_type": "name"}, {"api_name": "bottle.response.content_type", "line_number": 21, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 21, "usage_type": "name"}, {"api_name": "smeterd.storage.generate_report", "line_number": 48, "usage_type": "call"}, {"api_name": "smeterd.storage", "line_number": 48, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 51, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 46, "usage_type": "call"}, {"api_name": "smeterd.meter.read_one_packet", "line_number": 56, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 53, "usage_type": "call"}, {"api_name": "smeterd.utils.get_absolute_path", "line_number": 61, "usage_type": "call"}, {"api_name": "smeterd.utils", "line_number": 61, "usage_type": "name"}, {"api_name": "bottle.install", "line_number": 62, "usage_type": "call"}, {"api_name": "bottle_sqlite.SQLitePlugin", "line_number": 62, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "630584621", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 13 15:56:07 2020\r\nRevised on Nov. 19\r\nThis file plots the particle deposition depending on the size of the particles in a loop.\r\nParameters:\r\n Details can be found in the INPUT PARAMETERS section\r\n\r\nNotes:\r\n - the particle diameter is specified in mm in the txt file. Here we use microns\r\n - Figure saved as SVG\r\n - help on plotting found here https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.bar.html\r\n - rev 04:\r\n - title and figure labels are defined in the parameters section\r\n - axisOfInterest added (more flexible). Values: x is sagittal - y is rostrocaudal - z is height\r\n\r\n@author: jmbouteiller\r\n\"\"\"\r\n\r\n#from numpy import genfromtxt\r\nimport pandas\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom Coverage_Functions import total_area_Data_Refined\r\nfrom Coverage_Functions import filter_Spray_RefinedV3\r\nimport os\r\nfrom Coverage_Functions import total_area_filterData_Refined\r\nfrom datetime import date\r\n#===================== INPUT PARAMETERS ==========================\r\n# inputFile = 'ang_dis_vel_1200000_part_viral_wide.txt'\r\ninputFile = '3000_68.txt'\r\nsprayFile ='0.68_1.8MSpray_Sticky.txt'\r\ninputmesh = 'halfGeometry2.obj'\r\nshow3D = False\r\nexport3Dmesh = False\r\n# Column names for the txt file\r\nSprayIndex = np.array(['x', 'y', 'z','size', 'cone_angle','spray_vel'])\r\n# SprayIndex = np.array(['x', 'y', 'z','size'])\r\n# myIndex = np.array(['x', 'y', 'z','size','inhale_speed'])\r\nmyIndex = np.array(['x', 'y', 'z','size','dose'])\r\nswabpercent = 1 #Coverage Percentage of using Swab 1 being 100%\r\nswabpos = 1/4 # Swab coverage location in y axis, fraction of anterior coverage\r\nNumsitcom = 4 #Number of situation to compare\r\nthreviral = 0.3 #threshold for viral particle coverage area infection\r\nthrespray = 0.3 #threshold for spray particle coverage area protection\r\nNumofsize = 5 #Define how many columns of different sizes you want to show\r\n# #===================== END INPUT PARAMETERS ==========================\r\n\r\nraw_data = pandas.read_csv(inputFile, delim_whitespace=True, comment='%', header=0)\r\nttv = len(raw_data.columns) #number of columns in viral data\r\nraw_data = raw_data.dropna()\r\nraw_data = raw_data.to_numpy()\r\nspray_data = pandas.read_csv(sprayFile, delim_whitespace=True, comment='%', header=0)\r\nttc = len(spray_data.columns) # number of columns in spray data\r\nspray_data = spray_data.dropna()\r\nspray_data = spray_data.to_numpy()\r\nNpara = len(myIndex)\r\nNpara2 = len(SprayIndex)\r\nNumsit = int((ttc-1)/Npara2)\r\n# Numsit = 1\r\nNumsitV = int((ttv-1)/Npara)\r\n\r\n\r\n##Different Viral situations\r\nfor j in range(NumsitV):\r\n #Initialize protection rate for spray, swab+spray\r\n Maxspray = 100\r\n Maxss = 100\r\n MedSS = 0\r\n MaxD = pandas.DataFrame(data=raw_data[:, j * Npara + 1:(j + 1) * Npara + 1], columns=myIndex)\r\n sortdata = MaxD.sort_values(['size'])\r\n #Filter out viral particle at outlet\r\n viralout = sortdata[sortdata['z'] < (min(sortdata['z']) + max(sortdata['size']))]\r\n sortdata = sortdata[sortdata['z'] >= (min(sortdata['z']) + max(sortdata['size']))]\r\n coveragelen = min(sortdata['y']) + (max(sortdata['y']) - min(sortdata['y'])) * swabpos\r\n coveragelenx = -30\r\n NumofSit = int((np.shape(raw_data)[1] - 1) / Npara)\r\n sortdata['size'] = (sortdata['size'] * 100000 + .1).astype(int) # +0.1 to make sure the it will not be rounded to 0\r\n size = np.array(sortdata['size'].unique())\r\n rangeS = int(len(size)/Numofsize)\r\n sizelist = np.array([size[0], size[rangeS], size[rangeS*2], size[rangeS*3], size[rangeS*4]])\r\n\r\n ##No Mask Analysis **\r\n depos = np.zeros(Numofsize)\r\n for i in range(0, Numofsize):\r\n tepdata = sortdata.loc[sortdata['size'] <= size[(i+1)*rangeS-1]]\r\n depos[i] = total_area_Data_Refined(inputmesh, tepdata, myIndex, False, False, threviral)\r\n\r\n ##Swab Analysis **\r\n deposswab = np.zeros(Numofsize)\r\n swabsort = sortdata.loc[(sortdata['y'] > coveragelen) | (sortdata['z'] > coveragelenx) | (sortdata['z'] < -50)] ## -30 and -50 are from COMSOL z axis\r\n for i in range(0,Numofsize):\r\n tepdata = swabsort.loc[swabsort['size'] <= size[(i+1)*rangeS-1]]\r\n deposswab[i] = total_area_Data_Refined(inputmesh, tepdata,myIndex, False, False,threviral)\r\n\r\n ##Mask Analysis **\r\n deposnew = np.zeros(Numofsize)\r\n partnew = sortdata.loc[sortdata['size'] == size[0]]\r\n # deposnew[0] = total_area_Data_Refined(inputmesh, partnew, myIndex, False, False, threviral)\r\n for i in range(1, len(size)):\r\n if size[i] < 40:\r\n tepdata = sortdata.loc[sortdata['size'] == size[i]]\r\n partnew = np.vstack((partnew, tepdata))\r\n elif size[i] < 110:\r\n tepdata = sortdata.loc[sortdata['size'] == size[i]]\r\n dropsize = int((0.25) * tepdata.shape[0]) ##Mask #1\r\n # dropsize = int(((size[i] - 1) /20 * 0.24 + 0.76) * tepdata.shape[0]) ##Mask #2\r\n # dropsize = int(((size[i] - 1)/20 * 0.70) * tepdata.shape[0]) ##Mask #3\r\n # dropsize = int(((size[i] - 1) /20 - 1) * -0.08 + 0.44) * tepdata.shape[0]) ##Mask #4\r\n drop_indices = np.random.choice(tepdata.index, dropsize, replace=False)\r\n tepdata = tepdata.drop(drop_indices)\r\n partnew = np.vstack((partnew, tepdata))\r\n else:\r\n tepdata = sortdata.loc[sortdata['size'] == size[i]]\r\n dropsize = int(((size[i] - 100) / 2000 * 0.50 + 0.25) * tepdata.shape[0]) ##Mask #1\r\n # dropsize = int(((size[i] - 1) /20 * 0.24 + 0.76) * tepdata.shape[0]) ##Mask #2\r\n # dropsize = int(((size[i] - 1)/20 * 0.70) * tepdata.shape[0]) ##Mask #3\r\n # dropsize = int(((size[i] - 1) /20 - 1) * -0.08 + 0.44) * tepdata.shape[0]) ##Mask #4\r\n drop_indices = np.random.choice(tepdata.index, dropsize, replace=False)\r\n tepdata = tepdata.drop(drop_indices)\r\n partnew = np.vstack((partnew, tepdata))\r\n partnew = pandas.DataFrame(data=partnew, columns=myIndex)\r\n for i in range(0,Numofsize):\r\n tepdata = partnew.loc[partnew['size'] <= size[(i + 1) * rangeS - 1]]\r\n deposnew[i] = total_area_Data_Refined(inputmesh, tepdata, myIndex, False, False, threviral)\r\n\r\n #Analyze Mask, Swab, No protection with one viral situation\r\n Max = total_area_Data_Refined(inputmesh, sortdata, myIndex, show3D, export3Dmesh,threviral) # Total depos No Mask as control\r\n Maxswab = total_area_Data_Refined(inputmesh, swabsort, myIndex, show3D, export3Dmesh, threviral) # Swab\r\n MaxM = total_area_Data_Refined(inputmesh, partnew, myIndex, show3D, export3Dmesh, threviral) # Mask\r\n\r\n ## Different Spray situations\r\n sprayrange = np.zeros((0,3))\r\n swabrange = np.zeros((0,3))\r\n swabrange = swabrange + 100\r\n for k in range(Numsit):\r\n filename = '' + str(date.today().isoformat()) + sprayFile\r\n Spray = pandas.DataFrame(data = spray_data[:,k*Npara2+1:(k+1)*Npara2+1], columns = SprayIndex)\r\n # Filter out all the spray and viral particles at outlet,\r\n # +1 to ensure particle with all diameter will be covered\r\n sprayout = Spray[Spray['z'] < (min(Spray['z']) + max(Spray['size']))]\r\n Spray = Spray[Spray['z'] >= (min(Spray['z']) + max(Spray['size']))]\r\n SpID = filter_Spray_RefinedV3(Spray,sortdata,myIndex)\r\n\r\n #Spray Analysis\r\n tepspray = total_area_Data_Refined(inputmesh, SpID,myIndex, show3D, export3Dmesh,threviral) #Spray\r\n sprayrange = np.vstack([sprayrange,[tepspray, Spray.iloc[0]['cone_angle'], Spray.iloc[0]['spray_vel']]])\r\n if tepspray < Maxspray:\r\n Maxspray = tepspray\r\n Angle = Spray.iloc[0]['cone_angle']\r\n Velocity = Spray.iloc[0]['spray_vel']\r\n deposspray = np.zeros(Numofsize)\r\n for i in range(0,Numofsize):\r\n tepdata = SpID.loc[SpID['size'] <= size[(i+1)*rangeS-1]]\r\n deposspray[i] = total_area_Data_Refined(inputmesh, tepdata,myIndex, False, False,threviral)\r\n #Swab + Spray Analysis\r\n swabspray = np.zeros(Numofsize)\r\n sprayswab = SpID.loc[(SpID['y'] > coveragelen) | (SpID['z'] > coveragelenx)]\r\n for i in range(0,Numofsize):\r\n tepdata = sprayswab.loc[sprayswab['size'] <= size[(i+1)*rangeS-1]]\r\n swabspray[i] = total_area_Data_Refined(inputmesh, tepdata,myIndex, False, False,threviral)\r\n tepSS = total_area_Data_Refined(inputmesh, sprayswab, myIndex, show3D, export3Dmesh,threviral) #Swab+Spray\r\n swabrange = np.vstack([swabrange,[tepSS,Spray.iloc[0]['cone_angle'], Spray.iloc[0]['spray_vel']]])\r\n if tepSS < Maxss:\r\n Maxss = tepSS\r\n AngleSS = Spray.iloc[0]['cone_angle']\r\n VelocitySS = Spray.iloc[0]['spray_vel']\r\n MedSS = Maxss\r\n spraystd = np.std(sprayrange[:,0])\r\n spraymean = np.mean(sprayrange[:,0])\r\n spraymin = np.min(sprayrange[:,0])\r\n swabmin = np.min(swabrange[:,0])\r\n\r\n\r\n barrange = np.array([spraymin,tepspray])\r\n swabbar = np.array([swabmin,tepSS])\r\n print('Best Cone angle is ' + str(Angle) + '\\n' +'Best spray Velocity is '+ str(Velocity) + '\\n' +\r\n 'Best Cone angleSS is ' + str(AngleSS) + '\\n' +'Best spray VelocitySS is '+ str(VelocitySS))\r\n\r\nnp.savetxt(filename + 'spray.txt', sprayrange,delimiter = ' ')\r\nnp.savetxt(filename + 'swab.txt', swabrange, delimiter= ' ')\r\n\r\nax = plt.subplot(2,1,1)\r\nax.bar(sizelist/100-0.2, (depos - deposnew)/depos*100, width=0.2, color = 'k', ec = 'k', align='center')\r\nax.bar(sizelist/100, (depos - deposswab)/depos*100, width=0.2, color = 'darkgrey', ec = 'k', align='center')\r\nax.bar(sizelist/100+0.2, (depos - deposspray)/depos*100, width=0.2, color = 'w', ec = 'k', align='center')\r\nlegen = {'Mask':'k','Swab':'darkgrey', 'Spray':'w'}\r\nlabels = list(legen.keys())\r\nhandles = [plt.Rectangle((0,0),1,1, color=legen[label]) for label in labels]\r\nax.legend(handles,labels,prop = {\"size\":15})\r\nax.set_title('Protection rate with different situation of different size of particles',fontsize = 15)\r\nplt.xlabel('Particle diameter(um)')\r\nplt.ylabel('Protection Rate(%)')\r\nxvalue = np.arange(Numsitcom)\r\n# yvalue = [(Max - MaxM)/Max*100,(Max - Maxswab)/Max*100,(Max - Minswab)/Max*100, (Max - Maxspray)/Max*100,\r\n# (Max - MedSS)/Max*100, (Max - MedSM)/Max*100, (Max - Min)/Max*100]\r\nyvalue = [(Max - MaxM)/Max*100,(Max - Maxswab)/Max*100, (Max - spraymin)/Max*100,\r\n (Max - swabmin)/Max*100]\r\nax1 = plt.subplot(2,1,2)\r\nax1.bar(xvalue, yvalue,color = 'lightgrey', ec = 'k', align = 'center')\r\nfor index, value in enumerate(yvalue):\r\n ax1.text(index-0.1, value+1, str(int(value))+'%')\r\n# xlabels = ['Mask', 'Swab', 'Swab+Mask', 'Spray', 'Spray+Swab', 'Spray+Mask', 'Spray+Swab+Mask']\r\nxlabels = ['Mask', 'Swab', 'Spray', 'Spray+Swab']\r\nplt.xticks(xvalue,xlabels)\r\nax1.set_title('Protection rate between different situations',fontsize = 15)\r\nplt.xlabel('Protection Method')\r\nplt.ylabel('Protection Rate(%)')\r\nnameOfFig = str(int(spray_data[0,(k+1)*Npara-2]))+str(int(spray_data[0,(k+1)*Npara-1])) + filename\r\nfigure = plt.gcf()\r\nfigure.set_size_inches(11, 8)\r\nplt.savefig(os.path.join(str(nameOfFig) + '.pdf'), dpi=300)\r\nplt.savefig(os.path.join(str(nameOfFig) + '.svg'), format=\"svg\")\r\nplt.show()\r\n# fig.savefig( fileName+'.svg', dpi=150)", "sub_path": "NewCoverageDepV9_area.py", "file_name": "NewCoverageDepV9_area.py", "file_ext": "py", "file_size_in_byte": 11321, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 122, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 125, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 128, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 129, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 137, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "Coverage_Functions.filter_Spray_RefinedV3", "line_number": 143, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 157, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 161, "usage_type": "call"}, {"api_name": "Coverage_Functions.total_area_Data_Refined", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path", "line_number": 213, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}]} +{"seq_id": "442906997", "text": "from PIL import Image\nfrom numpy import *\nfrom scipy.ndimage import filters\n\npath = '/Users/danielvillarreal/Dropbox/School/College/Fall 2016/Computer Vision/images/'\nname = 'building'\nim = array(Image.open(path + name + '.jpg').convert('L'))\nfor sigma in range (0,6):\n\tim2 = filters.gaussian_filter(im,sigma)\n\tim2 = Image.fromarray(im2)\n\tim2.convert('RGB').save(name + 'Gaussian_StdDev_' + str(sigma) + '.png','png')\n", "sub_path": "Labs/Lab1/gaussianBlurGrayFull.py", "file_name": "gaussianBlurGrayFull.py", "file_ext": "py", "file_size_in_byte": 419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "PIL.Image.open", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 7, "usage_type": "name"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 9, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "102170619", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\n__all__ = (\n \"add_object_align_init\",\n \"object_data_add\",\n \"AddObjectHelper\",\n \"object_add_grid_scale\",\n \"object_add_grid_scale_apply_operator\",\n \"object_image_guess\",\n \"world_to_camera_view\",\n)\n\n\nimport bpy\n\nfrom bpy.props import (\n BoolProperty,\n BoolVectorProperty,\n FloatVectorProperty,\n)\n\n\ndef add_object_align_init(context, operator):\n \"\"\"\n Return a matrix using the operator settings and view context.\n\n :arg context: The context to use.\n :type context: :class:`bpy.types.Context`\n :arg operator: The operator, checked for location and rotation properties.\n :type operator: :class:`bpy.types.Operator`\n :return: the matrix from the context and settings.\n :rtype: :class:`mathutils.Matrix`\n \"\"\"\n\n from mathutils import Matrix, Vector, Euler\n properties = operator.properties if operator is not None else None\n\n space_data = context.space_data\n if space_data and space_data.type != 'VIEW_3D':\n space_data = None\n\n # location\n if operator and properties.is_property_set(\"location\"):\n location = Matrix.Translation(Vector(properties.location))\n else:\n location = Matrix.Translation(context.scene.cursor_location)\n\n if operator:\n properties.location = location.to_translation()\n\n # rotation\n view_align = (context.preferences.edit.object_align == 'VIEW')\n view_align_force = False\n if operator:\n if properties.is_property_set(\"view_align\"):\n view_align = view_align_force = operator.view_align\n else:\n if properties.is_property_set(\"rotation\"):\n # ugh, 'view_align' callback resets\n value = properties.rotation[:]\n properties.view_align = view_align\n properties.rotation = value\n del value\n else:\n properties.view_align = view_align\n\n if operator and (properties.is_property_set(\"rotation\") and\n not view_align_force):\n\n rotation = Euler(properties.rotation).to_matrix().to_4x4()\n else:\n if view_align and space_data:\n rotation = space_data.region_3d.view_matrix.to_3x3().inverted()\n rotation.resize_4x4()\n else:\n rotation = Matrix()\n\n # set the operator properties\n if operator:\n properties.rotation = rotation.to_euler()\n\n return location @ rotation\n\n\ndef object_data_add(context, obdata, operator=None, name=None):\n \"\"\"\n Add an object using the view context and preference to initialize the\n location, rotation and layer.\n\n :arg context: The context to use.\n :type context: :class:`bpy.types.Context`\n :arg obdata: the data used for the new object.\n :type obdata: valid object data type or None.\n :arg operator: The operator, checked for location and rotation properties.\n :type operator: :class:`bpy.types.Operator`\n :arg name: Optional name\n :type name: string\n :return: the newly created object in the scene.\n :rtype: :class:`bpy.types.Object`\n \"\"\"\n scene = context.scene\n layer = context.view_layer\n layer_collection = context.layer_collection or layer.active_layer_collection\n scene_collection = layer_collection.collection\n\n for ob in layer.objects:\n ob.select_set(False)\n\n if name is None:\n name = \"Object\" if obdata is None else obdata.name\n\n obj_act = layer.objects.active\n obj_new = bpy.data.objects.new(name, obdata)\n scene_collection.objects.link(obj_new)\n obj_new.select_set(True)\n obj_new.matrix_world = add_object_align_init(context, operator)\n\n if obj_act and obj_act.mode == 'EDIT' and obj_act.type == obj_new.type:\n bpy.ops.mesh.select_all(action='DESELECT')\n obj_act.select_set(True)\n bpy.ops.object.mode_set(mode='OBJECT')\n\n obj_act.select_set(True)\n scene.update() # apply location\n # layer.objects.active = obj_new\n\n # Match up UV layers, this is needed so adding an object with UV's\n # doesn't create new layers when there happens to be a naming mis-match.\n uv_new = obdata.uv_layers.active\n if uv_new is not None:\n uv_act = obj_act.data.uv_layers.active\n if uv_act is not None:\n uv_new.name = uv_act.name\n\n bpy.ops.object.join() # join into the active.\n if obdata:\n bpy.data.meshes.remove(obdata)\n\n bpy.ops.object.mode_set(mode='EDIT')\n else:\n layer.objects.active = obj_new\n if context.preferences.edit.use_enter_edit_mode:\n bpy.ops.object.mode_set(mode='EDIT')\n\n return obj_new\n\n\nclass AddObjectHelper:\n def view_align_update_callback(self, context):\n if not self.view_align:\n self.rotation.zero()\n\n view_align: BoolProperty(\n name=\"Align to View\",\n default=False,\n update=view_align_update_callback,\n )\n location: FloatVectorProperty(\n name=\"Location\",\n subtype='TRANSLATION',\n )\n rotation: FloatVectorProperty(\n name=\"Rotation\",\n subtype='EULER',\n )\n\n @classmethod\n def poll(self, context):\n return context.scene.library is None\n\n\ndef object_add_grid_scale(context):\n \"\"\"\n Return scale which should be applied on object\n data to align it to grid scale\n \"\"\"\n\n space_data = context.space_data\n\n if space_data and space_data.type == 'VIEW_3D':\n return space_data.overlay.grid_scale_unit\n\n return 1.0\n\n\ndef object_add_grid_scale_apply_operator(operator, context):\n \"\"\"\n Scale an operators distance values by the grid size.\n \"\"\"\n grid_scale = object_add_grid_scale(context)\n\n properties = operator.properties\n properties_def = properties.bl_rna.properties\n for prop_id in properties_def.keys():\n if not properties.is_property_set(prop_id):\n prop_def = properties_def[prop_id]\n if prop_def.unit == 'LENGTH' and prop_def.subtype == 'DISTANCE':\n setattr(operator, prop_id,\n getattr(operator, prop_id) * grid_scale)\n\n\ndef object_image_guess(obj, bm=None):\n \"\"\"\n Return a single image used by the object,\n first checking the texture-faces, then the material.\n \"\"\"\n # TODO, cycles/nodes materials\n me = obj.data\n if bm is None:\n if obj.mode == 'EDIT':\n import bmesh\n bm = bmesh.from_edit_mesh(me)\n\n if bm is not None:\n tex_layer = bm.faces.layers.tex.active\n if tex_layer is not None:\n for f in bm.faces:\n image = f[tex_layer].image\n if image is not None:\n return image\n else:\n tex_layer = me.uv_textures.active\n if tex_layer is not None:\n for tf in tex_layer.data:\n image = tf.image\n if image is not None:\n return image\n\n for m in obj.data.materials:\n if m is not None:\n # backwards so topmost are highest priority\n for mtex in reversed(m.texture_slots):\n if mtex and mtex.use_map_color_diffuse:\n texture = mtex.texture\n if texture and texture.type == 'IMAGE':\n image = texture.image\n if image is not None:\n return image\n return None\n\n\ndef world_to_camera_view(scene, obj, coord):\n \"\"\"\n Returns the camera space coords for a 3d point.\n (also known as: normalized device coordinates - NDC).\n\n Where (0, 0) is the bottom left and (1, 1)\n is the top right of the camera frame.\n values outside 0-1 are also supported.\n A negative 'z' value means the point is behind the camera.\n\n Takes shift-x/y, lens angle and sensor size into account\n as well as perspective/ortho projections.\n\n :arg scene: Scene to use for frame size.\n :type scene: :class:`bpy.types.Scene`\n :arg obj: Camera object.\n :type obj: :class:`bpy.types.Object`\n :arg coord: World space location.\n :type coord: :class:`mathutils.Vector`\n :return: a vector where X and Y map to the view plane and\n Z is the depth on the view axis.\n :rtype: :class:`mathutils.Vector`\n \"\"\"\n from mathutils import Vector\n\n co_local = obj.matrix_world.normalized().inverted() @ coord\n z = -co_local.z\n\n camera = obj.data\n frame = [-v for v in camera.view_frame(scene=scene)[:3]]\n if camera.type != 'ORTHO':\n if z == 0.0:\n return Vector((0.5, 0.5, 0.0))\n else:\n frame = [(v / (v.z / z)) for v in frame]\n\n min_x, max_x = frame[1].x, frame[2].x\n min_y, max_y = frame[0].y, frame[1].y\n\n x = (co_local.x - min_x) / (max_x - min_x)\n y = (co_local.y - min_y) / (max_y - min_y)\n\n return Vector((x, y, z))\n", "sub_path": "engine/2.80/scripts/modules/bpy_extras/object_utils.py", "file_name": "object_utils.py", "file_ext": "py", "file_size_in_byte": 9625, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "mathutils.Matrix.Translation", "line_number": 62, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 62, "usage_type": "name"}, {"api_name": "mathutils.Vector", "line_number": 62, "usage_type": "call"}, {"api_name": "mathutils.Matrix.Translation", "line_number": 64, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 64, "usage_type": "name"}, {"api_name": "mathutils.Euler", "line_number": 88, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 94, "usage_type": "call"}, {"api_name": "bpy.data.objects.new", "line_number": 131, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 131, "usage_type": "attribute"}, {"api_name": "bpy.ops.mesh.select_all", "line_number": 137, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 137, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 139, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 139, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.join", "line_number": 153, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 153, "usage_type": "attribute"}, {"api_name": "bpy.data.meshes.remove", "line_number": 155, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 155, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 157, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 157, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.mode_set", "line_number": 161, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 161, "usage_type": "attribute"}, {"api_name": "bpy.props.BoolProperty", "line_number": 171, "usage_type": "call"}, {"api_name": "bpy.props.FloatVectorProperty", "line_number": 176, "usage_type": "call"}, {"api_name": "bpy.props.FloatVectorProperty", "line_number": 180, "usage_type": "call"}, {"api_name": "bmesh.from_edit_mesh", "line_number": 230, "usage_type": "call"}, {"api_name": "mathutils.Vector", "line_number": 292, "usage_type": "call"}, {"api_name": "mathutils.Vector", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "431772978", "text": "#!./P2ENV/bin/python\n# Random Walk visually\n\nimport matplotlib.pyplot as plt\nfrom random_walk import RandomWalk\n\nrw = RandomWalk(5000)\nrw.fill_walk()\n\n# Colormap for the walk light color(start) to dark color(end) of the walk\n# point_numbers stores the steps of the walk in order and used in c=\npoint_numbers = list(range(rw.num_points))\n\n# Set the size of the plotting window.\nplt.figure(figsize=(20, 9))\n\n# Emphasize the first and last points.\n# plt.plot(0, 0, c='green', edgecolors='none', s=100)\n# plt.plot(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none',\n# s=100)\n\n# plt.plot(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues,\n# edgecolor='none', s=15)\n\nplt.plot(rw.x_values, rw.y_values, linewidth=1, c='yellow')\n\n# Remove the axes.\nplt.axis('off')\n\nplt.show()\n", "sub_path": "Part_II/rw_visual.1.py", "file_name": "rw_visual.1.py", "file_ext": "py", "file_size_in_byte": 814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "random_walk.RandomWalk", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "603426372", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#'''\n#This node listens to a service call and a topic for text to speech\n#requests. These will be processed by the festival or the philips tts module.\n#'''\n\nimport rospy\nimport tf\n\n\nclass Ghost(object):\n def __init__(self):\n # topics\n self.prefix = \"hero\"\n self.broadcaster = tf.TransformBroadcaster()\n\n def create_ghosts(self):\n self.add_prefix(\"base_link\")\n\n self.remap_frame(\"head_tilt_link\", \"/\" + self.prefix + \"/neck_tilt\")\n self.remap_frame(\"head_rgbd_sensor_gazebo_frame\", \"/\" + self.prefix + \"/top_kinect/openni_camera\")\n self.remap_frame(\"base_range_sensor_link\", \"/\" + self.prefix + \"/base_laser\")\n \n self.broadcaster.sendTransform((0, 0, 0), tf.transformations.quaternion_from_euler(3.14159265, -1.570796325, 0),\n rospy.Time.now(), \"/\" + self.prefix + \"/grippoint_left\", \"hand_palm_link\")\n self.broadcaster.sendTransform((0, 0, 0), tf.transformations.quaternion_from_euler(3.14159265, -1.570796325, 0),\n rospy.Time.now(), \"/\" + self.prefix + \"/grippoint_right\", \"hand_palm_link\")\n self.broadcaster.sendTransform((0, 0, 0), tf.transformations.quaternion_from_euler(3.14159265, -1.570796325, 0),\n rospy.Time.now(), \"/\" + self.prefix + \"/head_mount\", \"torso_lift_link\")\n\n def add_prefix(self, frame):\n ghost_frame = \"/\" + self.prefix + \"/\" + frame\n self.remap_frame(frame, ghost_frame)\n\n def remap_frame(self, frame, ghost_frame):\n self.broadcaster.sendTransform((0, 0, 0), tf.transformations.quaternion_from_euler(0, 0, 0),\n rospy.Time.now(), ghost_frame, frame)\n\n\nif __name__ == \"__main__\":\n rospy.init_node('tf_ghost_publisher')\n rate = rospy.Rate(50)\n try:\n ghost = Ghost()\n while not rospy.is_shutdown():\n ghost.create_ghosts()\n rate.sleep()\n except rospy.ROSInterruptException:\n pass\n", "sub_path": "src/tf_ghost/tf_ghost.py", "file_name": "tf_ghost.py", "file_ext": "py", "file_size_in_byte": 2063, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "tf.TransformBroadcaster", "line_number": 17, "usage_type": "call"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 26, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 26, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 27, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 28, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 29, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 30, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 31, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tf.transformations.quaternion_from_euler", "line_number": 38, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rospy.Time.now", "line_number": 39, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rospy.init_node", "line_number": 43, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 44, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 47, "usage_type": "call"}, {"api_name": "rospy.ROSInterruptException", "line_number": 50, "usage_type": "attribute"}]} +{"seq_id": "109677188", "text": "from Scripts import inputManager\nfrom Scripts import character\nfrom colorama import Fore, Back, Style, init\n\n\nclass Player(character.Character):\n\n def __init__(self, name, health):\n super().__init__(name, health)\n\n self.dungeon = ''\n self.currentRoom = ''\n self.inputManager = ''\n\n self.inventory = {}\n\n # Init colorama\n init()\n\n def Setup(self, currentDungeon):\n self.dungeon = currentDungeon\n\n # The room the player is currently in. Set to the start room when the player starts the game.\n self.currentRoom = self.dungeon.startRoom\n\n # InputManager to handle typing\n self.inputManager = inputManager.InputManager(self, self.dungeon)\n\n def CheckInventory(self):\n\n print(\"\\n------------------------ \\n\"\n + Fore.BLUE + \"Inventory: \" + Fore.RESET + str(len(self.inventory)) + \" items \\n\")\n\n for item in self.inventory:\n print(\" - \" + item)\n\n print(\"\\n------------------------\" + Back.RESET)\n", "sub_path": "Scripts/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 1026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "Scripts.character.Character", "line_number": 6, "usage_type": "attribute"}, {"api_name": "Scripts.character", "line_number": 6, "usage_type": "name"}, {"api_name": "colorama.init", "line_number": 18, "usage_type": "call"}, {"api_name": "Scripts.inputManager.InputManager", "line_number": 27, "usage_type": "call"}, {"api_name": "Scripts.inputManager", "line_number": 27, "usage_type": "name"}, {"api_name": "colorama.Fore.BLUE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 32, "usage_type": "name"}, {"api_name": "colorama.Fore.RESET", "line_number": 32, "usage_type": "attribute"}, {"api_name": "colorama.Back.RESET", "line_number": 37, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "593314271", "text": "import numpy as np\nimport pandas as pd\nfrom scipy.interpolate import interpn\n\nfrom foxes.core import TurbineType\nfrom foxes.utils import PandasFileHelper\nfrom foxes.data import PCTCURVE, parse_Pct_two_files\nimport foxes.variables as FV\nimport foxes.constants as FC\n\n\nclass WsRho2PCtFromTwo(TurbineType):\n \"\"\"\n Calculate air density dependent power\n and ct values, as given by two individual\n files.\n\n The structure of each file is:\n ws,1.225,0.950,0.975,...,1.275\n\n The first column represents wind speed in m/s\n and the subsequent columns are air density values\n (not neccessarily in order).\n\n Attributes\n ----------\n source_P: str or pandas.DataFrame\n The file path for the power curve, static name, or data\n source_ct: str or pandas.DataFrame\n The file path for the ct curve, static name, or data\n WSCT: str\n The wind speed variable for ct lookup\n WSP: str\n The wind speed variable for power lookup\n rpars_P: dict, optional\n Parameters for pandas power file reading\n rpars_ct: dict, optional\n Parameters for pandas ct file reading\n ipars_P: dict, optional\n Parameters for scipy.interpolate.interpn()\n ipars_ct: dict, optional\n Parameters for scipy.interpolate.interpn()\n\n :group: models.turbine_types\n\n \"\"\"\n\n def __init__(\n self,\n data_source_P,\n data_source_ct,\n p_ct=1.0,\n p_P=1.88,\n var_ws_ct=FV.REWS2,\n var_ws_P=FV.REWS3,\n pd_file_read_pars_P={},\n pd_file_read_pars_ct={},\n interpn_pars_P=None,\n interpn_pars_ct=None,\n **parameters,\n ):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n data_source_P: str or pandas.DataFrame\n The file path for the power curve, static name, or data\n data_source_ct: str or pandas.DataFrame\n The file path for the ct curve, static name, or data\n p_ct: float\n The exponent for yaw dependency of ct\n p_P: float\n The exponent for yaw dependency of P\n var_ws_ct: str\n The wind speed variable for ct lookup\n var_ws_P: str\n The wind speed variable for power lookup\n pd_file_read_pars_P: dict\n Parameters for pandas power file reading\n pd_file_read_pars_ct: dict\n Parameters for pandas ct file reading\n interpn_pars_P: dict, optional\n Parameters for scipy.interpolate.interpn()\n interpn_pars_ct: dict, optional\n Parameters for scipy.interpolate.interpn()\n parameters: dict, optional\n Additional parameters for TurbineType class\n\n \"\"\"\n if not isinstance(data_source_P, pd.DataFrame) or not isinstance(\n data_source_ct, pd.DataFrame\n ):\n pars = parse_Pct_two_files(data_source_P, data_source_ct)\n else:\n pars = parameters\n super().__init__(**pars)\n\n self.source_P = data_source_P\n self.source_ct = data_source_ct\n self.p_ct = p_ct\n self.p_P = p_P\n self.WSCT = var_ws_ct\n self.WSP = var_ws_P\n self.rpars_P = pd_file_read_pars_P\n self.rpars_ct = pd_file_read_pars_ct\n self.ipars_P = interpn_pars_P\n self.ipars_ct = interpn_pars_ct\n\n if self.ipars_P is None:\n self.ipars_P = dict(method=\"linear\", bounds_error=True, fill_value=0.0)\n if self.ipars_ct is None:\n self.ipars_ct = dict(method=\"linear\", bounds_error=True, fill_value=0.0)\n\n self._P = None\n self._ct = None\n\n def output_farm_vars(self, algo):\n \"\"\"\n The variables which are being modified by the model.\n\n Parameters\n ----------\n algo: foxes.core.Algorithm\n The calculation algorithm\n\n Returns\n -------\n output_vars: list of str\n The output variable names\n\n \"\"\"\n return [FV.P, FV.CT]\n\n def initialize(self, algo, verbosity=0):\n \"\"\"\n Initializes the model.\n\n This includes loading all required data from files. The model\n should return all array type data as part of the idata return\n dictionary (and not store it under self, for memory reasons). This\n data will then be chunked and provided as part of the mdata object\n during calculations.\n\n Parameters\n ----------\n algo: foxes.core.Algorithm\n The calculation algorithm\n verbosity: int\n The verbosity level, 0 = silent\n\n Returns\n -------\n idata: dict\n The dict has exactly two entries: `data_vars`,\n a dict with entries `name_str -> (dim_tuple, data_ndarray)`;\n and `coords`, a dict with entries `dim_name_str -> dim_array`\n\n \"\"\"\n # read power curve:\n if isinstance(self.source_P, pd.DataFrame):\n data = self.source_P\n else:\n fpath = algo.dbook.get_file_path(PCTCURVE, self.source_P, check_raw=True)\n pars = {\"index_col\": 0}\n pars.update(self.rpars_P)\n data = PandasFileHelper.read_file(fpath, **pars)\n\n data.sort_index(inplace=True)\n data.columns = data.columns.astype(FC.DTYPE)\n self._ws_P = data.index.to_numpy(FC.DTYPE)\n self._rho_P = np.sort(data.columns.to_numpy())\n self._P = data[self._rho_P].to_numpy(FC.DTYPE)\n\n # read ct curve:\n if isinstance(self.source_ct, pd.DataFrame):\n data = self.source_ct\n else:\n fpath = algo.dbook.get_file_path(PCTCURVE, self.source_ct, check_raw=True)\n pars = {\"index_col\": 0}\n pars.update(self.rpars_ct)\n data = PandasFileHelper.read_file(fpath, **pars)\n\n data.sort_index(inplace=True)\n data.columns = data.columns.astype(FC.DTYPE)\n self._ws_ct = data.index.to_numpy(FC.DTYPE)\n self._rho_ct = np.sort(data.columns.to_numpy())\n self._ct = data[self._rho_ct].to_numpy(FC.DTYPE)\n\n return super().initialize(algo, verbosity)\n\n def _bounds_info(self, target, qts):\n \"\"\"Helper function for printing bounds info\"\"\"\n\n print(f\"\\nBOUNDS INFO FOR TARGET {target}\")\n WS = self.WSP if target == FV.P else self.WSCT\n ws = self._ws_P if target == FV.P else self._ws_ct\n rho = self._rho_P if target == FV.P else self._rho_ct\n print(f\" {WS}: min = {np.min(ws):.4f}, max = {np.max(ws):.4f}\")\n print(f\" {FV.RHO}: min = {np.min(rho):.4f}, max = {np.max(rho):.4f}\")\n\n print(f\"DATA INFO FOR TARGET {target}\")\n ws = qts[:, 0]\n rho = qts[:, 1]\n print(f\" {WS}: min = {np.min(ws):.4f}, max = {np.max(ws):.4f}\")\n print(f\" {FV.RHO}: min = {np.min(rho):.4f}, max = {np.max(rho):.4f}\")\n print()\n\n def calculate(self, algo, mdata, fdata, st_sel):\n \"\"\" \"\n The main model calculation.\n\n This function is executed on a single chunk of data,\n all computations should be based on numpy arrays.\n\n Parameters\n ----------\n algo: foxes.core.Algorithm\n The calculation algorithm\n mdata: foxes.core.Data\n The model data\n fdata: foxes.core.Data\n The farm data\n st_sel: numpy.ndarray of bool\n The state-turbine selection,\n shape: (n_states, n_turbines)\n\n Returns\n -------\n results: dict\n The resulting data, keys: output variable str.\n Values: numpy.ndarray with shape (n_states, n_turbines)\n\n \"\"\"\n\n # calculate P:\n st_sel_P = (\n st_sel\n & (fdata[self.WSP] >= self._ws_P[0])\n & (fdata[self.WSP] <= self._ws_P[-1])\n )\n st_sel_P0 = st_sel & ~st_sel_P\n if np.any(st_sel_P0):\n fdata[FV.P][st_sel_P0] = 0\n if np.any(st_sel_P):\n # prepare interpolation:\n n_sel = np.sum(st_sel_P)\n qts = np.zeros((n_sel, 2), dtype=FC.DTYPE) # ws, rho\n qts[:, 0] = fdata[self.WSP][st_sel_P]\n qts[:, 1] = fdata[FV.RHO][st_sel_P]\n\n # apply yaw corrections:\n if FV.YAWM in fdata and self.p_P is not None:\n # calculate corrected wind speed wsc,\n # gives ws**3 * cos**p_P in partial load region\n # and smoothly deals with full load region:\n yawm = fdata[FV.YAWM][st_sel_P]\n if np.any(np.isnan(yawm)):\n raise ValueError(\n f\"{self.name}: Found NaN values for variable '{FV.YAWM}'. Maybe change order in turbine_models?\"\n )\n cosm = np.cos(yawm / 180 * np.pi)\n qts[:, 0] *= (cosm**self.p_P) ** (1.0 / 3.0)\n del yawm, cosm\n\n # run interpolation:\n try:\n fdata[FV.P][st_sel_P] = interpn(\n (self._ws_P, self._rho_P), self._P, qts, **self.ipars_P\n )\n except ValueError as e:\n self._bounds_info(FV.P, qts)\n raise e\n del st_sel_P, st_sel_P0\n\n # calculate ct:\n st_sel_ct = (\n st_sel\n & (fdata[self.WSCT] >= self._ws_P[0])\n & (fdata[self.WSCT] <= self._ws_P[-1])\n )\n st_sel_ct0 = st_sel & ~st_sel_ct\n if np.any(st_sel_ct0):\n fdata[FV.CT][st_sel_ct0] = 0\n if np.any(st_sel_ct):\n # prepare interpolation:\n n_sel = np.sum(st_sel_ct)\n qts = np.zeros((n_sel, 2), dtype=FC.DTYPE) # ws, rho\n qts[:, 0] = fdata[self.WSP][st_sel_ct]\n qts[:, 1] = fdata[FV.RHO][st_sel_ct]\n\n # apply yaw corrections:\n if FV.YAWM in fdata and self.p_ct is not None:\n # calculate corrected wind speed wsc,\n # gives ws**3 * cos**p_P in partial load region\n # and smoothly deals with full load region:\n yawm = fdata[FV.YAWM][st_sel_ct]\n if np.any(np.isnan(yawm)):\n raise ValueError(\n f\"{self.name}: Found NaN values for variable '{FV.YAWM}'. Maybe change order in turbine_models?\"\n )\n cosm = np.cos(yawm / 180 * np.pi)\n qts[:, 0] *= (cosm**self.p_ct) ** 0.5\n del yawm, cosm\n\n # run interpolation:\n try:\n fdata[FV.CT][st_sel_ct] = interpn(\n (self._ws_ct, self._rho_ct), self._ct, qts, **self.ipars_ct\n )\n except ValueError as e:\n self._bounds_info(FV.CT, qts)\n raise e\n\n return {v: fdata[v] for v in self.output_farm_vars(algo)}\n\n def finalize(self, algo, verbosity=0):\n \"\"\"\n Finalizes the model.\n\n Parameters\n ----------\n algo: foxes.core.Algorithm\n The calculation algorithm\n verbosity: int\n The verbosity level\n\n \"\"\"\n del self._ws_P, self._rho_P, self._ws_ct, self._rho_ct\n self._P = None\n self._ct = None\n super().finalize(algo, verbosity)\n", "sub_path": "foxes/models/turbine_types/wsrho2PCt_from_two.py", "file_name": "wsrho2PCt_from_two.py", "file_ext": "py", "file_size_in_byte": 11215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "foxes.core.TurbineType", "line_number": 12, "usage_type": "name"}, {"api_name": "foxes.variables.REWS2", "line_number": 54, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 54, "usage_type": "name"}, {"api_name": "foxes.variables.REWS3", "line_number": 55, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 55, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "attribute"}, {"api_name": "foxes.data.parse_Pct_two_files", "line_number": 94, "usage_type": "call"}, {"api_name": "foxes.variables.P", "line_number": 133, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 133, "usage_type": "name"}, {"api_name": "foxes.variables.CT", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 161, "usage_type": "attribute"}, {"api_name": "foxes.data.PCTCURVE", "line_number": 164, "usage_type": "argument"}, {"api_name": "foxes.utils.PandasFileHelper.read_file", "line_number": 167, "usage_type": "call"}, {"api_name": "foxes.utils.PandasFileHelper", "line_number": 167, "usage_type": "name"}, {"api_name": "foxes.constants.DTYPE", "line_number": 170, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 170, "usage_type": "name"}, {"api_name": "foxes.constants.DTYPE", "line_number": 171, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 171, "usage_type": "name"}, {"api_name": "numpy.sort", "line_number": 172, "usage_type": "call"}, {"api_name": "foxes.constants.DTYPE", "line_number": 173, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 173, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "attribute"}, {"api_name": "foxes.data.PCTCURVE", "line_number": 179, "usage_type": "argument"}, {"api_name": "foxes.utils.PandasFileHelper.read_file", "line_number": 182, "usage_type": "call"}, {"api_name": "foxes.utils.PandasFileHelper", "line_number": 182, "usage_type": "name"}, {"api_name": "foxes.constants.DTYPE", "line_number": 185, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 185, "usage_type": "name"}, {"api_name": "foxes.constants.DTYPE", "line_number": 186, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 186, "usage_type": "name"}, {"api_name": "numpy.sort", "line_number": 187, "usage_type": "call"}, {"api_name": "foxes.constants.DTYPE", "line_number": 188, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 188, "usage_type": "name"}, {"api_name": "foxes.variables.P", "line_number": 196, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 196, "usage_type": "name"}, {"api_name": "foxes.variables.P", "line_number": 197, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 197, "usage_type": "name"}, {"api_name": "foxes.variables.P", "line_number": 198, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 199, "usage_type": "call"}, {"api_name": "foxes.variables.RHO", "line_number": 200, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 200, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 205, "usage_type": "call"}, {"api_name": "foxes.variables.RHO", "line_number": 206, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 206, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 243, "usage_type": "call"}, {"api_name": "foxes.variables.P", "line_number": 244, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 244, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 248, "usage_type": "call"}, {"api_name": "foxes.constants.DTYPE", "line_number": 248, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 248, "usage_type": "name"}, {"api_name": "foxes.variables.RHO", "line_number": 250, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 250, "usage_type": "name"}, {"api_name": "foxes.variables.YAWM", "line_number": 253, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 253, "usage_type": "name"}, {"api_name": "foxes.variables.YAWM", "line_number": 257, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 257, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 258, "usage_type": "call"}, {"api_name": "foxes.variables.YAWM", "line_number": 260, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 260, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 262, "usage_type": "attribute"}, {"api_name": "foxes.variables.P", "line_number": 268, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 268, "usage_type": "name"}, {"api_name": "scipy.interpolate.interpn", "line_number": 268, "usage_type": "call"}, {"api_name": "foxes.variables.P", "line_number": 272, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 272, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 283, "usage_type": "call"}, {"api_name": "foxes.variables.CT", "line_number": 284, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 284, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 288, "usage_type": "call"}, {"api_name": "foxes.constants.DTYPE", "line_number": 288, "usage_type": "attribute"}, {"api_name": "foxes.constants", "line_number": 288, "usage_type": "name"}, {"api_name": "foxes.variables.RHO", "line_number": 290, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 290, "usage_type": "name"}, {"api_name": "foxes.variables.YAWM", "line_number": 293, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 293, "usage_type": "name"}, {"api_name": "foxes.variables.YAWM", "line_number": 297, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 297, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 298, "usage_type": "call"}, {"api_name": "foxes.variables.YAWM", "line_number": 300, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 300, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 302, "usage_type": "attribute"}, {"api_name": "foxes.variables.CT", "line_number": 308, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 308, "usage_type": "name"}, {"api_name": "scipy.interpolate.interpn", "line_number": 308, "usage_type": "call"}, {"api_name": "foxes.variables.CT", "line_number": 312, "usage_type": "attribute"}, {"api_name": "foxes.variables", "line_number": 312, "usage_type": "name"}]} +{"seq_id": "543813996", "text": "import s3\r\nimport botocore\r\nfrom s3exception import *\r\n\r\nclass s3service:\r\n s3 = None\r\n config = None\r\n\r\n def __init__(self, s3, config):\r\n self.s3 = s3\r\n self.config = config\r\n\r\n def uploadObject(self, key, objectData, bucketName=None, metaData={}):\r\n if bucketName == None:\r\n bucketName = self.config.bucketName\r\n\r\n s3Connection = self.s3.getConnection(self.config.endpointUrl)\r\n try:\r\n s3Connection.Bucket(bucketName).put_object(Key=key, Body=objectData, Metadata=metaData)\r\n return True\r\n except Exception:\r\n return False\r\n\r\n def getObject(self, key, bucketName=None):\r\n if bucketName == None:\r\n bucketName = self.config.bucketName\r\n\r\n s3Connection = self.s3.getConnection(self.config.endpointUrl)\r\n if not self.doesObjectExist(key, bucketName, s3Connection):\r\n return { 'Body': None }\r\n else:\r\n return s3Connection.Object(bucketName, key).get()\r\n\r\n def doesObjectExist(self, key, bucketName, s3Connection=None):\r\n if s3Connection == None:\r\n s3Connection = self.s3.getConnection(self.config.endpointUrl)\r\n try:\r\n s3Connection.Object(bucketName, key).load()\r\n return True\r\n except botocore.exceptions.ClientError as e:\r\n return False\r\n\r\n def doesBucketExist(self, bucketName):\r\n s3Connection = self.s3.getConnection()\r\n try:\r\n s3Connection.meta.client.head_bucket(Bucket=bucketName)\r\n return True\r\n except botocore.exceptions.ClientError as error:\r\n statusCode = int(error.response['Error']['Code'])\r\n if statusCode == 403:\r\n raise s3exception(403, \"Forbidden\", bucketName)\r\n if statusCode == 404:\r\n raise s3exception(404, \"Not Found\", bucketName)\r\n", "sub_path": "python/s3_helper/s3service.py", "file_name": "s3service.py", "file_ext": "py", "file_size_in_byte": 1891, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "botocore.exceptions", "line_number": 40, "usage_type": "attribute"}, {"api_name": "botocore.exceptions", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "346282242", "text": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom accounts.models import Profile\n\n\n# Create your models here.\n\nclass Post(models.Model):\n post_image = models.ImageField(upload_to='projects_pics/')\n title = models.CharField(max_length=100)\n content = models.TextField()\n url = models.CharField(max_length=100)\n date_posted = models.DateTimeField(default=timezone.now)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('post-detail', kwargs={'pk': self.pk})\n\n\nclass Rating(models.Model):\n design = models.IntegerField(blank=True, default=0)\n usability = models.IntegerField(blank=True, default=0)\n content = models.IntegerField(blank=True, default=0)\n average_score = models.IntegerField(blank=True, default=0)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n profile = models.ForeignKey(Profile, on_delete=models.CASCADE)\n", "sub_path": "projects/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.db.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.urls.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 31, "usage_type": "call"}, {"api_name": "accounts.models.Profile", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "581089410", "text": "__author__ = 'tom'\nfrom django.db import connection\n\n\n\n\n\ndef select_sql(sql):\n cursor = connection.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result\n\n\ndef Article_sql(user_id):\n sql = '''\n select a.title,a.date,a.content,\n b.who_comment,b.content,b.comment_date,\n c.who_revert,c.content,c.revert_date\n from Article a left join Comment b on a.id=b.article_id\n left join Revert c on b.id=c.comment_id\n where a.user_id=%s\n\n ''' % user_id\n\n article_list = select_sql(sql)\n list_name = [\n 'title', 'date', 'A_content',\n 'who_comment', 'C_content', 'comment_date',\n 'who_revert', 'R_content', 'revert_date',\n ]\n\n result = [dict(zip(list_name, _)) for _ in article_list]\n return result", "sub_path": "XM_new/XM/apis.py", "file_name": "apis.py", "file_ext": "py", "file_size_in_byte": 915, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.db.connection.cursor", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "142570785", "text": "import os\nimport pytest\nimport sys\nimport grpc\nfrom helpers.cluster import ClickHouseCluster, run_and_check\n\n# The test cluster is configured with certificate for that host name, see 'server-ext.cnf'.\n# The client have to verify server certificate against that name. Client uses SNI\nSSL_HOST = \"integration-tests.clickhouse.com\"\nGRPC_PORT = 9100\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\nDEFAULT_ENCODING = \"utf-8\"\n\n\n# Use grpcio-tools to generate *pb2.py files from *.proto.\n\nproto_dir = os.path.join(SCRIPT_DIR, \"./protos\")\ngen_dir = os.path.join(SCRIPT_DIR, \"./_gen\")\nos.makedirs(gen_dir, exist_ok=True)\nrun_and_check(\n \"python3 -m grpc_tools.protoc -I{proto_dir} --python_out={gen_dir} --grpc_python_out={gen_dir} \\\n {proto_dir}/clickhouse_grpc.proto\".format(\n proto_dir=proto_dir, gen_dir=gen_dir\n ),\n shell=True,\n)\n\nsys.path.append(gen_dir)\nimport clickhouse_grpc_pb2\nimport clickhouse_grpc_pb2_grpc\n\n\n# Utilities\n\nconfig_dir = os.path.join(SCRIPT_DIR, \"./configs\")\ncluster = ClickHouseCluster(__file__)\nnode = cluster.add_instance(\n \"node\",\n main_configs=[\n \"configs/grpc_config.xml\",\n \"configs/server-key.pem\",\n \"configs/server-cert.pem\",\n \"configs/ca-cert.pem\",\n ],\n # Bug in TSAN reproduces in this test https://github.com/grpc/grpc/issues/29550#issuecomment-1188085387\n env_variables={\n \"TSAN_OPTIONS\": \"report_atomic_races=0 \" + os.getenv(\"TSAN_OPTIONS\", default=\"\")\n },\n)\n\n\ndef get_grpc_url(instance=node):\n return f\"{instance.ip_address}:{GRPC_PORT}\"\n\n\ndef create_secure_channel():\n ca_cert = open(os.path.join(config_dir, \"ca-cert.pem\"), \"rb\").read()\n client_key = open(os.path.join(config_dir, \"client-key.pem\"), \"rb\").read()\n client_cert = open(os.path.join(config_dir, \"client-cert.pem\"), \"rb\").read()\n credentials = grpc.ssl_channel_credentials(ca_cert, client_key, client_cert)\n channel = grpc.secure_channel(\n get_grpc_url(),\n credentials,\n options=((\"grpc.ssl_target_name_override\", SSL_HOST),),\n )\n grpc.channel_ready_future(channel).result(timeout=10)\n return channel\n\n\ndef create_insecure_channel():\n channel = grpc.insecure_channel(get_grpc_url())\n grpc.channel_ready_future(channel).result(timeout=2)\n return channel\n\n\ndef create_secure_channel_with_wrong_client_certificate():\n ca_cert = open(os.path.join(config_dir, \"ca-cert.pem\"), \"rb\").read()\n client_key = open(os.path.join(config_dir, \"wrong-client-key.pem\"), \"rb\").read()\n client_cert = open(os.path.join(config_dir, \"wrong-client-cert.pem\"), \"rb\").read()\n credentials = grpc.ssl_channel_credentials(ca_cert, client_key, client_cert)\n channel = grpc.secure_channel(get_grpc_url(), credentials)\n grpc.channel_ready_future(channel).result(timeout=2)\n return channel\n\n\ndef query(query_text, channel):\n query_info = clickhouse_grpc_pb2.QueryInfo(query=query_text)\n stub = clickhouse_grpc_pb2_grpc.ClickHouseStub(channel)\n result = stub.ExecuteQuery(query_info)\n if result and result.HasField(\"exception\"):\n raise Exception(result.exception.display_text)\n return result.output.decode(DEFAULT_ENCODING)\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef start_cluster():\n cluster.start()\n try:\n yield cluster\n\n finally:\n cluster.shutdown()\n\n\n# Actual tests\n\n\ndef test_secure_channel():\n with create_secure_channel() as channel:\n assert query(\"SELECT 'ok'\", channel) == \"ok\\n\"\n\n\ndef test_insecure_channel():\n with pytest.raises(grpc.FutureTimeoutError):\n with create_insecure_channel() as channel:\n query(\"SELECT 'ok'\", channel)\n\n\ndef test_wrong_client_certificate():\n with pytest.raises(grpc.FutureTimeoutError):\n with create_insecure_channel() as channel:\n query(\"SELECT 'ok'\", channel)\n", "sub_path": "tests/integration/test_grpc_protocol_ssl/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "helpers.cluster.run_and_check", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "helpers.cluster.ClickHouseCluster", "line_number": 36, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "grpc.ssl_channel_credentials", "line_number": 60, "usage_type": "call"}, {"api_name": "grpc.secure_channel", "line_number": 61, "usage_type": "call"}, {"api_name": "grpc.channel_ready_future", "line_number": 66, "usage_type": "call"}, {"api_name": "grpc.insecure_channel", "line_number": 71, "usage_type": "call"}, {"api_name": "grpc.channel_ready_future", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "grpc.ssl_channel_credentials", "line_number": 80, "usage_type": "call"}, {"api_name": "grpc.secure_channel", "line_number": 81, "usage_type": "call"}, {"api_name": "grpc.channel_ready_future", "line_number": 82, "usage_type": "call"}, {"api_name": "clickhouse_grpc_pb2.QueryInfo", "line_number": 87, "usage_type": "call"}, {"api_name": "clickhouse_grpc_pb2_grpc.ClickHouseStub", "line_number": 88, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 95, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 114, "usage_type": "call"}, {"api_name": "grpc.FutureTimeoutError", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 120, "usage_type": "call"}, {"api_name": "grpc.FutureTimeoutError", "line_number": 120, "usage_type": "attribute"}]} +{"seq_id": "412706987", "text": "import logging\nimport configparser\nfrom Bot import TelegramBot\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.DEBUG\n)\nlogger = logging.getLogger(__name__)\n\nif __name__ == '__main__':\n config = configparser.ConfigParser()\n try:\n config.read(\"../config/settings.cfg\")\n telegram_token = config[\"telegram\"][\"token\"]\n openrouteservice_apikey = config[\"openrouteservice\"][\"apikey\"]\n openweathermap_apikey = config[\"openweathermap\"][\"apikey\"]\n except:\n config.read(\"config/settings.cfg\")\n telegram_token = config[\"telegram\"][\"token\"]\n openrouteservice_apikey = config[\"openrouteservice\"][\"apikey\"]\n openweathermap_apikey = config[\"openweathermap\"][\"apikey\"]\n\n telegram_bot = TelegramBot(\n telegram_token=telegram_token,\n openrouteservice_apikey=openrouteservice_apikey,\n openweathermap_apikey=openweathermap_apikey,\n )\n telegram_bot.run_bot()\n", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 991, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.basicConfig", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 12, "usage_type": "call"}, {"api_name": "Bot.TelegramBot", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "344673369", "text": "import sys\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\nsetup(\n name='anybase32',\n version='1.1.0',\n packages=find_packages(exclude=[\"tests\"]),\n url='https://github.com/alanblevins/anybase32',\n download_url='https://github.com/alanblevins/anybase32/tarball/1.1.0',\n license='MIT',\n author='Alan Blevins',\n author_email='alan.blevins@gmail.com',\n description='Encode and decode base32 data using arbitrary alphabets',\n keywords=['base32', 'encode', 'decode'],\n tests_require=['pytest'],\n cmdclass={'test': PyTest},\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1135, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "setuptools.command.test.test", "line_number": 6, "usage_type": "name"}, {"api_name": "setuptools.command.test.test.initialize_options", "line_number": 10, "usage_type": "call"}, {"api_name": "setuptools.command.test.test", "line_number": 10, "usage_type": "name"}, {"api_name": "setuptools.command.test.test.finalize_options", "line_number": 14, "usage_type": "call"}, {"api_name": "setuptools.command.test.test", "line_number": 14, "usage_type": "name"}, {"api_name": "pytest.main", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 22, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 24, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "256459307", "text": "import re\nfrom fractions import Fraction\n\ndef get_int_from_string(s):\n return int(\"\".join([i for i in s if i.isdigit()]))\n\ndef get_numless_field_name(s):\n # Returns \"ingredient_quantity\" given \"ingredient_quantity_4\"\n numless_string = \"\".join([i for i in s if not i.isdigit()])\n return numless_string[:-1]\n\ndef get_next_week_and_year(current_week_number, current_year):\n \"\"\"\n Given current week number and year returns a list of next week number and year.\n (52, 2017) returns [1, 2018]\n \"\"\"\n if current_week_number == 52:\n current_week_number = 0\n current_year += 1\n return [current_week_number + 1, current_year]\n\ndef sort_recipe_ingredients_by_aisle(recipe_ingredients):\n \"\"\"\n Given a list of RecipeIngredient models, returns list of dictionaries where\n ingredients are sorted by their grocery aisle.\n [\n {'aisle': aisle_object, 'recipe_ingredients' = [recipe_ingredient_object, recipe_ingredient_object]},\n {'aisle': aisle_object, 'recipe_ingredients': [recipe_ingredient_object]}\n ]\n \"\"\"\n sorted_ingredients = []\n for recipe_ingredient in recipe_ingredients:\n already_sorted = False\n for aisle_ingredients in sorted_ingredients:\n if not already_sorted:\n if aisle_ingredients['aisle'] == recipe_ingredient.ingredient.grocery_aisle:\n aisle_ingredients['recipe_ingredients'].append(recipe_ingredient)\n already_sorted = True\n if not already_sorted:\n sorted_ingredients.append({\n 'aisle': recipe_ingredient.ingredient.grocery_aisle,\n 'recipe_ingredients': [recipe_ingredient]\n }\n )\n return sorted_ingredients\n\ndef strip_trailing_zeros(s): # Given 3.00 returns '3'.\n return ('%f' % s).rstrip('0').rstrip('.')\n\ndef get_fraction_str_from_int(i): # Given 2.5 returns \"2 1/2\".\n result = \"\"\n if int(i): # Integer place (\"2\")\n result += str(int(i))\n if i % 1: # Decimal place (.5 --> \"1/2\")\n if int(i): # Adds space between both places if present. # \"2 1/2\"\n result += \" \"\n result += str(Fraction(i % 1))\n return result\n\ndef strip_http_www_from_url_string(s):\n r = re.search('^https?:\\/\\/(?:w{3}.)?(.*)\\/?$', s)\n if r:\n return r.group(1)\n return s", "sub_path": "grocery_list/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "fractions.Fraction", "line_number": 57, "usage_type": "call"}, {"api_name": "re.search", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "129993128", "text": "# -*- coding: utf-8 -*-\n'''\nweather_demo.py通过调用免费天气接口GET https://www.tianqiapi.com/api/ \n获取实时天气信息,然后提取出关键信息调用PIL库生成图片保存在本地\n'''\nimport requests\nimport json\nfrom PIL import Image,ImageDraw,ImageFont\n\nclass Weather(object):\n \n def __init__(self):\n self.url = 'https://www.tianqiapi.com/api/'\n\n def get_page(self,params):\n res = requests.get(self.url,params=params)\n res.encoding = 'utf-8'\n source = res.text\n self.parse_page(source)\n\n def parse_page(self,source):\n wea_dic = json.loads(source)\n d = {\n 'date':wea_dic['date'],\n 'city':wea_dic['city'],\n 'wea_img':wea_dic['wea_img'],\n 'wea':wea_dic['wea'],\n 'tem':wea_dic['tem'],\n 'humidity':wea_dic['humidity'],\n 'air_level':wea_dic['air_level'],\n 'air_tips':wea_dic['air_tips'],\n }\n self.write_file(d)\n \n def write_file(self,d):\n # 创建绘画对象\n im = Image.new(\"RGB\",(380,220),'#40E0D0')\n # 初始化画笔\n draw = ImageDraw.Draw(im)\n # 字体颜色\n font = ImageFont.truetype('/home/tarena/wea_project/weather/simhei.ttf',20)\n # 写字符\n draw.text((5,10),'日期:{0} 城市:{1}'.format(d['date'],d['city']),font=font,fill='#000')\n draw.text((5,40),'温度:{0}°C 湿度:{1}'.format(d['tem'],d['humidity']),font=font,fill='#000')\n draw.text((5,70),'空气质量等级:{}'.format(d['air_level']),font=font,fill='#000')\n draw.text((175,70),'天气:{}'.format(d['wea']),font=font,fill='#000')\n draw.text((5,110),'空气质量描述:{}'.format(d['air_tips'])[:18],font=font,fill='#000')\n draw.text((5,140),' {}'.format(d['air_tips'])[18:36],font=font,fill='#000')\n draw.text((5,170),' {}'.format(d['air_tips'])[36:],font=font,fill='#000')\n # 添加图片\n png_path = \"/home/tarena/wea_project/weather/apple/{}\".format(d['wea_img'])\n mark = Image.open(png_path)\n im.paste(mark,(300,10))\n # 回收画笔\n del draw\n # 保存图片到内存中\n im.save('wea.png')\n\n def work_on(self,id):\n params = {\n 'version':'v6',\n 'cityid':id\n }\n self.get_page(params)\n\nif __name__ == '__main__':\n s = input('请输入要查询的城市名:')\n with open(\"/home/tarena/wea_project/weather/weather_id.json\") as f:\n # 读取json文件内容\n city_list = json.load(f)\n # 关闭json文件\n f.close()\n # 根据城市名获取城市id\n for i in city_list:\n if s == i['cityZh'] or s == i['provinceZh']:\n city_id = i['id']\n break \n id = city_id\n spider = Weather()\n spider.work_on(id)", "sub_path": "weather/weather_demo.py", "file_name": "weather_demo.py", "file_ext": "py", "file_size_in_byte": 2873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 52, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 52, "usage_type": "name"}, {"api_name": "json.load", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "1974517", "text": "from django.conf.urls import url\nfrom django.contrib import admin\nfrom base.views import index, sign_up, sign_in, log_out, radio, homepage\nfrom django.contrib.auth import views\nfrom django_douban import settings\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', index),\n url(r'^signup/', sign_up, name='signup'),\n url(r'^login/', sign_in, name='login'),\n url(r'^logout/', log_out, name='logout'),\n url(r'^radio/', radio, name='radio'),\n url(r'^people/(?P[a-zA-Z0-9_]+)/', homepage, name='homepage')\n]\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += [\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT})]\n", "sub_path": "django_douban/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "base.views.index", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "base.views.sign_up", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "base.views.sign_in", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "base.views.log_out", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "base.views.radio", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "base.views.homepage", "line_number": 14, "usage_type": "argument"}, {"api_name": "django_douban.settings.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django_douban.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django_douban.settings.MEDIA_ROOT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django_douban.settings", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "438435407", "text": "###\r\n#\r\n# Word2Vec utility functions\r\n#\r\n###\r\n\r\nimport gensim.downloader as api\r\nimport csv\r\nimport numpy as np\r\n\r\ndef get_word_vector(word, model):\r\n try:\r\n model[word]\r\n except:\r\n #print('Word not found')\r\n return False\r\n return model[word]\r\n \r\n \r\ndef word2vec_with_vocab(vocab_file, save_file, save_npy_file):\r\n \r\n model = api.load(\"word2vec-google-news-300\")\r\n \r\n missing_words = 0\r\n \r\n with open(vocab_file,'r') as v, \\\r\n open(save_file, 'w+') as f:\r\n \r\n word_vec_dim = 300\r\n \r\n vocab_data = csv.reader(v)\r\n wr = csv.writer(f)\r\n \r\n # i = 0\r\n \r\n word_vec_arr = []\r\n word_vectors = []\r\n \r\n for row in vocab_data:\r\n word_vec = get_word_vector(row[0], model)\r\n \r\n if isinstance(word_vec, bool):\r\n missing_words += 1\r\n # print(row[0])\r\n word_vec = [0] * word_vec_dim\r\n \r\n vec = [d for d in word_vec]\r\n temp = [row[0]] + vec\r\n word_vec_arr.append(temp)\r\n word_vectors.append(vec)\r\n \r\n # i += 1\r\n # if i > 100: break\r\n \r\n wr.writerows(word_vec_arr)\r\n np.save(save_npy_file, word_vectors)\r\n \r\n print('Missing %d words' % (missing_words))\r\n ", "sub_path": "utils/word2vec.py", "file_name": "word2vec.py", "file_ext": "py", "file_size_in_byte": 1382, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "gensim.downloader.load", "line_number": 22, "usage_type": "call"}, {"api_name": "gensim.downloader", "line_number": 22, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 31, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "250527537", "text": "#! /usr/bin/env python\n# coding: utf-8\n\nimport os\nimport json\nfrom JYTools.JYWorker import RedisWorker\n\n\nclass Vcf2mafWorker(RedisWorker):\n def handle_task(self, key, params):\n self.task_log(\"Start Task\", key)\n self.task_log(\"get params: %s\" % json.dumps(params))\n\n # 获取参数\n vcf_path = params[\"vcf_path\"]\n tumor_id, normal_id = params[\"tumor_id\"], params[\"normal_id\"]\n sample_no = params[\"sample_no\"]\n output_dir = os.path.join(out_dir, sample_no)\n\n if not vcf_path.endswith(\".vcf\"):\n self.set_current_task_error(\"input_vcf_path need psotfix .vcf\")\n\n if not os.path.isdir(output_dir):\n try:\n os.system(\"mkdir -p %s\" % output_dir)\n except:\n self.set_current_task_error(\"%s need be dir\" % output_dir)\n # get out_maf name\n maf_name = os.path.split(vcf_path)[-1].replace(\".vcf\", \".maf\")\n out_maf = os.path.join(output_dir, maf_name)\n os.system(\n \"/bin/bash {run_vcf2maf_program} {input_vcf_path} {tumor_id} {normal_id} {out_maf}\".format(\n run_vcf2maf_program=run_vcf2maf_program, input_vcf_path=vcf_path, tumor_id=tumor_id,\n normal_id=normal_id, out_maf=out_maf))\n out_info = dict()\n out_info[\"vep_vcf\"] = os.path.join(output_dir, \"%s.vep.vcf\" % vcf_path[:-4])\n out_info[\"out_maf\"] = out_maf\n self.set_multi_output(**out_info)\n self.task_log(\"finished\")\n\nhelp_message = \"\"\"\n optional arguments:\n -h, --help show this help message and exit\n -p, --program-path Run Vcf2maf Program Path\n -o, --output DATA Output dir\n -D, --daemon Background the HandleSxWorker process. [False]\n -b STRING, --heartbeat-value STRING heartbeat value. [uuid.uuid4().hex]\n \"\"\"\n\nif __name__ == \"__main__\":\n Vcf2mafWorker.init_parser.add_argument(\"-p\", \"--program-path\", dest=\"run_vcf2maf_program\",\n help=\"run vcf2maf program path\")\n Vcf2mafWorker.init_parser.add_argument(\"-o\", \"--output\", dest=\"output_dir\", help=\"output dir\")\n args = Vcf2mafWorker.parse_args()\n work_tag = args.work_tag if args.work_tag is not None else \"Vcf2maf\"\n run_vcf2maf_program = args.run_vcf2maf_program\n out_dir = args.output_dir\n\n app = Vcf2mafWorker(work_tag=work_tag, conf_path=args.conf_path, heartbeat_value=args.heartbeat_value,\n log_dir=args.log_dir)\n if args.example_path is not None:\n s, o = app.test(key=args.key, params_path=args.example_path, sub_key=args.sub_key,\n report_tag=args.report_tag)\n else:\n app.work(args.daemon)\n", "sub_path": "anzhen/branch_pipeline/Vcf2mafWorker.py", "file_name": "Vcf2mafWorker.py", "file_ext": "py", "file_size_in_byte": 2703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "JYTools.JYWorker.RedisWorker", "line_number": 9, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "163641307", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/3/17 15:39\n@Author : WangHuan\n@Contact : hi_chengzi@126.com\n@File : 1.6.py\n@Software: PyCharm\n@description: 实现字典中的键映射多个值\n\"\"\"\n\nfrom collections import defaultdict\n\n\nd = defaultdict(list)\nd['a'].append(1)\nd['a'].append(2)\nd['a'].append(3)\nfor key in d.keys():\n print(key, d[key])\n # a [1, 2, 3]\n\nd = defaultdict(set)\nd['a'].add(1)\nd['a'].add(2)\nd['b'].add(4)\nfor key in d.keys():\n print(key, d[key])\n # a {1, 2} b {4}\n\npairs = [('a',1), ('a',2), ('a',3), ('b',1)]\nd = defaultdict(list)\nfor key, value in pairs:\n d[key].append(value)\nprint(d)\n# defaultdict(, {'a': [1, 2, 3], 'b': [1]})\n\n'''\ndefaultdict对象,返回一个新的类似字典的对象:\n- defaultdict(list)即将(键-值对组成的)序列转换为(键-列表组成的)字典\n- 对于不存在的键会自动创建\n- 当值变成list、set类型时就可以使用append函数\n'''", "sub_path": "python3-cookbook/Chapter1/1.6.py", "file_name": "1.6.py", "file_ext": "py", "file_size_in_byte": 947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.defaultdict", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "107568046", "text": "import torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GraphAttentionLayer(nn.Module):\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttentionLayer, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n\n self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.data, gain=1.414)\n self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n\n self.leakrelu = nn.LeakyReLU(self.alpha)\n\n def forward(self, h, adj):\n Wh = torch.matmul(h, self.W)\n a_input = self._prepare_attention_mechanism_input(Wh)\n e = self.leakrelu(torch.matmul(a_input, self.a).squeeze(2))\n zero_vec = -9e15*torch.ones_like(e)\n attention = torch.where(adj > 0, e, zero_vec)\n attention = F.softmax(attention, dim=1)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.matmul(attention, Wh)\n\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def _prepare_attention_mechanism_input(self, Wh):\n N = Wh.shape[0]\n\n Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)\n Wh_repeated_alternating = Wh.repeat(N, 1)\n\n all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=1)\n return all_combinations_matrix.view(N, N, 2 * self.out_features)\n", "sub_path": "myGAT/layers.py", "file_name": "layers.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.functional.elu", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "93775534", "text": "import requests\nfrom argparse import ArgumentParser\nimport os\nfrom shutil import rmtree\nimport sys\nfrom urlparse import urljoin\nimport logging\n\n\nclass SlidesDeleter(object):\n\n def __init__(self, ome_base_url, slides_file_list, log_level='INFO', log_file=None):\n self.ome_delete_url = urljoin(ome_base_url, 'mirax/delete_files/')\n self.ome_get_file_info_url = urljoin(ome_base_url, 'mirax/file_info/')\n self.slides_list = self.get_slides_list(slides_file_list)\n self.logger = self.get_logger(log_level, log_file)\n self.INDEX_FILE_MT = 'mirax/index'\n self.DATA_FOLDER_MT = 'mirax/datafolder'\n\n def get_logger(self, log_level='INFO', log_file=None, mode='a'):\n LOG_FORMAT = '%(asctime)s|%(levelname)-8s|%(message)s'\n LOG_DATEFMT = '%Y-%m-%d %H:%M:%S'\n\n logger = logging.getLogger('slides_deleter')\n if not isinstance(log_level, int):\n try:\n log_level = getattr(logging, log_level)\n except AttributeError:\n raise ValueError('Unsupported literal log level: %s' % log_level)\n logger.setLevel(log_level)\n logger.handlers = []\n if log_file:\n handler = logging.FileHandler(log_file, mode=mode)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(LOG_FORMAT, datefmt=LOG_DATEFMT)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\n def get_slides_list(self, slides_list_file):\n with open(slides_list_file) as f:\n return [row.replace('\\n', '') for row in f]\n\n def _get_file_path(self, file_name, mimetype):\n response = requests.get(urljoin(self.ome_get_file_info_url, '%s/' % file_name),\n {'mimetype': mimetype})\n if response.status_code == requests.codes.OK:\n return response.json()['file_path']\n else:\n return None\n\n def _delete_file(self, file_path, is_folder=False):\n self.logger.info('## DELETING FROM DISK file %s' % file_path)\n try:\n if not is_folder:\n os.remove(file_path)\n else:\n rmtree(file_path)\n except OSError:\n self.logger.warn('File does not exist')\n\n def _delete_original_file(self, file_name):\n self.logger.info('## DELETING ORIGINAL FILE from OMERO %s' % file_name)\n response = requests.get(urljoin(self.ome_delete_url, '%s/' % file_name))\n if response.status_code != requests.codes.OK:\n self.logger.warn('RESPONSE CODE %s', response.status_code)\n self.logger.warn('%s', response.text)\n return False\n else:\n return True\n\n def run(self, delete_files=False):\n self.logger.info('STARTING DELETION JOB')\n for slide in self.slides_list:\n if delete_files:\n try:\n file_path = self._get_file_path(slide, self.INDEX_FILE_MT)\n folder_path = self._get_file_path(slide, self.DATA_FOLDER_MT)\n except TypeError:\n # if TypeError -> there is no file with that name on the server\n self.logger.warn('There is no file with name %s on the server', slide)\n continue\n deleted = self._delete_original_file(slide)\n if delete_files and deleted:\n self._delete_file(file_path)\n self._delete_file(folder_path, is_folder=True)\n self.logger.info('DELETION JOB COMPLETED')\n\n\ndef get_parser():\n parser = ArgumentParser('Delete Original File objects from OMERO and, optionally, from disk')\n parser.add_argument('--files-list', type=str, required=True,\n help='the list containing the names of the files that will be deleted')\n parser.add_argument('--ome-base-url', type=str, required=True,\n help='the base URL of the OMERO.web server')\n parser.add_argument('--delete', action='store_true',\n help='also delete files from disk')\n parser.add_argument('--log-level', type=str, default='INFO',\n help='log level (default=INFO)')\n parser.add_argument('--log-file', type=str, default=None,\n help='log file (default=stderr)')\n return parser\n\n\ndef main(argv):\n parser = get_parser()\n args = parser.parse_args(argv)\n deleter = SlidesDeleter(args.ome_base_url, args.files_list, args.log_level,\n args.log_file)\n deleter.run(args.delete)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "sub_path": "tools/delete_slides.py", "file_name": "delete_slides.py", "file_ext": "py", "file_size_in_byte": 4650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "urlparse.urljoin", "line_number": 13, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 57, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 59, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 65, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 65, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 66, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 115, "usage_type": "attribute"}]} +{"seq_id": "275513484", "text": "#!/usr/bin/python\n\n\nimport serial\n\n\ndef get_baudrate(default_baudrate=115200):\n\n cmd = \"AT+IPR?\\r\"\n\n ser = serial.Serial('/dev/ttyS3',default_baudrate, timeout=2)\n ser.write(cmd.encode())\n\n while True :\n\n byteCount = ser.inWaiting()\n\n if( byteCount >= 0):\n\n response = ser.readline()\n response = ser.readline().decode('utf-8')\n print (\"get baudrate respose {}\".format(response),end='')\n break\n ser.close()\n return \n\ndef set_baudrate():\n\n cmd=\"AT+IPR=460800\\r\"\n\n #cmd = \"AT+IPR?\\r\"\n ser = serial.Serial('/dev/ttyS3', 115200, timeout=2)\n ser.write(cmd.encode())\n\n while True :\n\n byteCount = ser.inWaiting()\n\n if( byteCount >= 0):\n\n response = ser.readline()\n response = ser.readline().decode('utf-8')\n #print (\"initial baudrate is -> {}\".format(response.decode(\"utf-8\")),end='')\n \n print(\"set baudrate respone {}\".format(response),end='')\n break\n ser.close()\n return\n\n\nprint(\"initial baudrate:\")\nget_baudrate()\nprint(\"setting baudrate:\")\nset_baudrate()\nprint(\"Checking new baudrate:\")\nget_baudrate()\n", "sub_path": "board/overlay/root/pyScan.py", "file_name": "pyScan.py", "file_ext": "py", "file_size_in_byte": 1179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "serial.Serial", "line_number": 11, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "342597875", "text": "from src import SpellingWithChemistryElement\nfrom collections import defaultdict\n\n\n### Try chopping off the first letter/first two letters and searching in the dictionary\n###\n\n\ndef main() -> None:\n element_file_name = '../data/elements.csv'\n elements = fill_element_dict(element_file_name)\n\n input_file_name = '../data/SpellingWithChemistry02.06.2017Input'\n input_file = open(input_file_name)\n\n for line in input_file:\n line = line.strip()\n elements_used = []\n if can_make_2(line, elements, elements_used):\n print(make_word(elements_used))\n input_file.close()\n\n\ndef make_word(elements: []) -> str:\n string = ''\n for e in elements:\n string += e.symbol\n string += ' ( '\n for e in elements:\n string += e.name + ' '\n string += ')'\n return string\n\n\ndef can_make_2(cur_word: str, e_dict: defaultdict, elements_used: []) -> bool:\n if len(cur_word) == 0:\n return True\n\n if cur_word[0:1].upper() in e_dict:\n elements_used.append(e_dict[cur_word[0:1].upper()])\n return can_make_2(cur_word[1:], e_dict, elements_used)\n if cur_word[0:1].upper() + cur_word[1:2] in e_dict:\n elements_used.append(e_dict[cur_word[0:1].upper() + cur_word[1:2]])\n return can_make_2(cur_word[2:], e_dict, elements_used)\n if cur_word[0:1].upper() + cur_word[1:3] in e_dict:\n elements_used.append(e_dict[cur_word[0:1].upper() + cur_word[1:3]])\n return can_make_2(cur_word[3:], e_dict, elements_used)\n return False\n\n\ndef can_make(cur_word: str, e_dict: defaultdict, elements_used: []) -> bool:\n if len(cur_word) == 0:\n return True\n for symbol in e_dict:\n if not cur_word.lower().startswith(str(symbol).lower()):\n continue\n if len(symbol) == 3 and len(cur_word) >= 3:\n part = cur_word[0:3].lower()\n if part == str(symbol).lower():\n elements_used.append(e_dict[symbol])\n return can_make(cur_word[3:], e_dict, elements_used)\n if len(symbol) == 1 and len(cur_word) >= 1:\n part = cur_word[0:1].lower()\n if part == str(symbol).lower():\n elements_used.append(e_dict[symbol])\n return can_make(cur_word[1:], e_dict, elements_used)\n if len(symbol) == 2 and len(cur_word) >= 2:\n part = cur_word[0:2].lower()\n if part == str(symbol).lower():\n elements_used.append(e_dict[symbol])\n return can_make(cur_word[2:], e_dict, elements_used)\n return False\n\n\ndef fill_element_dict(file_name: str) -> defaultdict:\n element_file = open(file_name)\n\n headers = True\n header_names = []\n element_dict = defaultdict()\n max_element_size = 2\n\n for line in element_file:\n line = line.strip()\n parts = line.split(',')\n if headers:\n for i in range(0, len(parts)):\n p = str(parts[i])\n header_names.append(p.strip())\n headers = False\n else:\n e = SpellingWithChemistryElement.Element(parts[0].strip(),\n parts[1].strip(),\n parts[2].strip(),\n parts[3].strip())\n if len(e.symbol) <= max_element_size:\n element_dict[e.symbol] = e\n element_file.close()\n\n print('Filled dictionary with ' + str(len(element_dict)) + ' elements.')\n return element_dict\n\nif __name__ == '__main__':\n main()", "sub_path": "src/SpellingWithChemistry02.06.2017.py", "file_name": "SpellingWithChemistry02.06.2017.py", "file_ext": "py", "file_size_in_byte": 3569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "collections.defaultdict", "line_number": 35, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 51, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 80, "usage_type": "call"}, {"api_name": "src.SpellingWithChemistryElement.Element", "line_number": 92, "usage_type": "call"}, {"api_name": "src.SpellingWithChemistryElement", "line_number": 92, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "528505982", "text": "import cv2\nimport time\n\ndef takeImage():\n cap = cv2.VideoCapture(0)\n time.sleep(0.5)\n\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # do what you want with frame\n # and then save to file\n time.sleep(0.5)\n cv2.imwrite('static/image.png', frame)\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n return (True)\n", "sub_path": "app/takeImage.py", "file_name": "takeImage.py", "file_ext": "py", "file_size_in_byte": 389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "cv2.VideoCapture", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 6, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "389220116", "text": "# -*- coding: utf-8 -*-\nimport datetime\nimport xml.etree.ElementTree as ET\nimport gzip\nimport zipfile as zf\nimport os\nimport itertools as it\n\nimport pandas as pd\nfrom operator import itemgetter\n\nfrom support_modules import support as sup\n\n\nclass LogReader(object):\n \"\"\"\n This class reads and parse the elements of a given event-log\n expected format .xes or .csv\n \"\"\"\n\n def __init__(self, input, settings):\n \"\"\"constructor\"\"\"\n self.input = input\n self.file_name, self.file_extension = self.define_ftype()\n\n self.timeformat = settings['timeformat']\n self.column_names = settings['column_names']\n self.one_timestamp = settings['one_timestamp']\n self.filter_d_attrib = settings['filter_d_attrib']\n self.ns_include = settings['ns_include']\n\n self.data = list()\n self.raw_data = list()\n self.load_data_from_file()\n\n def load_data_from_file(self):\n \"\"\"\n reads all the data from the log depending\n the extension of the file\n \"\"\"\n # TODO: esto se puede manejar mejor con un patron de diseno\n if self.file_extension == '.xes':\n self.get_xes_events_data()\n elif self.file_extension == '.csv':\n self.get_csv_events_data()\n # elif self.file_extension == '.mxml':\n # self.data, self.raw_data = self.get_mxml_events_data()\n\n# =============================================================================\n# xes methods\n# =============================================================================\n def get_xes_events_data(self):\n \"\"\"\n reads and parse all the events information from a xes file\n \"\"\"\n temp_data = list()\n tree = ET.parse(self.input)\n root = tree.getroot()\n if self.ns_include:\n ns = {'xes': root.tag.split('}')[0].strip('{')}\n tags = dict(trace='xes:trace',\n string='xes:string',\n event='xes:event',\n date='xes:date')\n else:\n ns = {'xes': ''}\n tags = dict(trace='trace',\n string='string',\n event='event',\n date='date')\n traces = root.findall(tags['trace'], ns)\n i = 0\n sup.print_performed_task('Reading log traces ')\n for trace in traces:\n temp_trace = list()\n caseid = ''\n for string in trace.findall(tags['string'], ns):\n if string.attrib['key'] == 'concept:name':\n caseid = string.attrib['value']\n for event in trace.findall(tags['event'], ns):\n task = ''\n user = ''\n event_type = ''\n for string in event.findall(tags['string'], ns):\n if string.attrib['key'] == 'concept:name':\n task = string.attrib['value']\n if string.attrib['key'] == 'org:resource':\n user = string.attrib['value']\n if string.attrib['key'] == 'lifecycle:transition':\n event_type = string.attrib['value'].lower()\n timestamp = ''\n for date in event.findall(tags['date'], ns):\n if date.attrib['key'] == 'time:timestamp':\n timestamp = date.attrib['value']\n try:\n timestamp = datetime.datetime.strptime(\n timestamp[:-6], self.timeformat)\n except ValueError:\n timestamp = datetime.datetime.strptime(\n timestamp, self.timeformat)\n # By default remove Start and End events\n # but will be added to standardize\n if task not in ['0', '-1', 'Start', 'End', 'start', 'end']:\n if ((not self.one_timestamp) or\n (self.one_timestamp and event_type == 'complete')):\n temp_trace.append(dict(caseid=caseid,\n task=task,\n event_type=event_type,\n user=user,\n timestamp=timestamp))\n if temp_trace:\n temp_trace = self.append_xes_start_end(temp_trace)\n temp_data.extend(temp_trace)\n i += 1\n self.raw_data = temp_data\n self.data = self.reorder_xes(temp_data)\n sup.print_done_task()\n\n def reorder_xes(self, temp_data):\n \"\"\"\n this method match the duplicated events on the .xes log\n \"\"\"\n temp_data = pd.DataFrame(temp_data)\n ordered_event_log = list()\n if self.one_timestamp:\n self.column_names['Complete Timestamp'] = 'end_timestamp'\n temp_data = temp_data[temp_data.event_type == 'complete']\n ordered_event_log = temp_data.rename(\n columns={'timestamp': 'end_timestamp'})\n ordered_event_log = ordered_event_log.drop(columns='event_type')\n ordered_event_log = ordered_event_log.to_dict('records')\n else:\n self.column_names['Start Timestamp'] = 'start_timestamp'\n self.column_names['Complete Timestamp'] = 'end_timestamp'\n cases = temp_data.caseid.unique()\n for case in cases:\n start_ev = (temp_data[(temp_data.event_type == 'start') &\n (temp_data.caseid == case)]\n .sort_values(by='timestamp', ascending=True)\n .to_dict('records'))\n complete_ev = (temp_data[(temp_data.event_type == 'complete') &\n (temp_data.caseid == case)]\n .sort_values(by='timestamp', ascending=True)\n .to_dict('records'))\n if len(start_ev) == len(complete_ev):\n temp_trace = list()\n for i, _ in enumerate(start_ev):\n match = False\n for j, _ in enumerate(complete_ev):\n if start_ev[i]['task'] == complete_ev[j]['task']:\n temp_trace.append(\n {'caseid': case,\n 'task': start_ev[i]['task'],\n 'user': start_ev[i]['user'],\n 'start_timestamp': start_ev[i]['timestamp'],\n 'end_timestamp': complete_ev[j]['timestamp']})\n match = True\n break\n if match:\n del complete_ev[j]\n if match:\n ordered_event_log.extend(temp_trace)\n return ordered_event_log\n\n def append_xes_start_end(self, trace):\n for event in ['Start', 'End']:\n idx = 0 if event == 'Start' else -1\n complete_ev = dict()\n complete_ev['caseid'] = trace[idx]['caseid']\n complete_ev['task'] = event\n complete_ev['event_type'] = 'complete'\n complete_ev['user'] = event\n complete_ev['timestamp'] = trace[idx]['timestamp']\n if event == 'Start':\n trace.insert(0, complete_ev)\n if not self.one_timestamp:\n start_ev = complete_ev.copy()\n start_ev['event_type'] = 'start'\n trace.insert(0, start_ev)\n else:\n trace.append(complete_ev)\n if not self.one_timestamp:\n start_ev = complete_ev.copy()\n start_ev['event_type'] = 'start'\n trace.insert(-1, start_ev)\n return trace\n\n# =============================================================================\n# csv methods\n# =============================================================================\n def get_csv_events_data(self):\n \"\"\"\n reads and parse all the events information from a csv file\n \"\"\"\n sup.print_performed_task('Reading log traces ')\n log = pd.read_csv(self.input, dtype={'user': str})\n if self.one_timestamp:\n self.column_names['Complete Timestamp'] = 'end_timestamp'\n log = log.rename(columns=self.column_names)\n log = log.astype({'caseid': object})\n log = (log[(log.task != 'Start') & (log.task != 'End')]\n .reset_index(drop=True))\n if self.filter_d_attrib:\n log = log[['caseid', 'task', 'user', 'end_timestamp']]\n log['end_timestamp'] = pd.to_datetime(log['end_timestamp'],\n format=self.timeformat)\n else:\n self.column_names['Start Timestamp'] = 'start_timestamp'\n self.column_names['Complete Timestamp'] = 'end_timestamp'\n log = log.rename(columns=self.column_names)\n log = log.astype({'caseid': object})\n log = (log[(log.task != 'Start') & (log.task != 'End')]\n .reset_index(drop=True))\n if self.filter_d_attrib:\n log = log[['caseid', 'task', 'user',\n 'start_timestamp', 'end_timestamp']]\n log['start_timestamp'] = pd.to_datetime(log['start_timestamp'],\n format=self.timeformat)\n log['end_timestamp'] = pd.to_datetime(log['end_timestamp'],\n format=self.timeformat)\n log['user'].fillna('SYS', inplace=True)\n self.data = log.to_dict('records')\n self.append_csv_start_end()\n self.split_event_transitions()\n sup.print_done_task()\n\n def split_event_transitions(self):\n temp_raw = list()\n if self.one_timestamp:\n for event in self.data:\n temp_event = event.copy()\n temp_event['timestamp'] = temp_event.pop('end_timestamp')\n temp_event['event_type'] = 'complete'\n temp_raw.append(temp_event)\n else:\n for event in self.data:\n start_event = event.copy()\n complete_event = event.copy()\n start_event.pop('end_timestamp')\n complete_event.pop('start_timestamp')\n start_event['timestamp'] = start_event.pop('start_timestamp')\n complete_event['timestamp'] = complete_event.pop('end_timestamp')\n start_event['event_type'] = 'start'\n complete_event['event_type'] = 'complete'\n temp_raw.append(start_event)\n temp_raw.append(complete_event)\n self.raw_data = temp_raw\n\n def append_csv_start_end(self):\n new_data = list()\n data = sorted(self.data, key=lambda x: x['caseid'])\n for key, group in it.groupby(data, key=lambda x: x['caseid']):\n trace = list(group)\n for new_event in ['Start', 'End']:\n idx = 0 if new_event == 'Start' else -1\n t_key = 'end_timestamp'\n if not self.one_timestamp and new_event == 'Start':\n t_key = 'start_timestamp'\n temp_event = dict()\n temp_event['caseid'] = trace[idx]['caseid']\n temp_event['task'] = new_event\n temp_event['user'] = new_event\n temp_event['end_timestamp'] = trace[idx][t_key]\n if not self.one_timestamp:\n temp_event['start_timestamp'] = trace[idx][t_key]\n if new_event == 'Start':\n trace.insert(0, temp_event)\n else:\n trace.append(temp_event)\n new_data.extend(trace)\n self.data = new_data\n\n# =============================================================================\n# Accesssor methods\n# =============================================================================\n def get_traces(self):\n \"\"\"\n returns the data splitted by caseid and ordered by start_timestamp\n \"\"\"\n cases = list(set([x['caseid'] for x in self.data]))\n traces = list()\n for case in cases:\n order_key = 'end_timestamp' if self.one_timestamp else 'start_timestamp'\n trace = sorted(\n list(filter(lambda x: (x['caseid'] == case), self.data)),\n key=itemgetter(order_key))\n traces.append(trace)\n return traces\n\n def get_raw_traces(self):\n \"\"\"\n returns the raw data splitted by caseid and ordered by timestamp\n \"\"\"\n cases = list(set([c['caseid'] for c in self.raw_data]))\n traces = list()\n for case in cases:\n trace = sorted(\n list(filter(lambda x: (x['caseid'] == case), self.raw_data)),\n key=itemgetter('timestamp'))\n traces.append(trace)\n return traces\n\n def set_data(self, data):\n \"\"\"\n seting method for the data attribute\n \"\"\"\n self.data = data\n\n# =============================================================================\n# Support Method\n# =============================================================================\n def define_ftype(self):\n filename, file_extension = os.path.splitext(self.input)\n if file_extension in ['.xes', '.csv', '.mxml']:\n filename = filename + file_extension\n file_extension = file_extension\n elif file_extension == '.gz':\n outFileName = filename\n filename, file_extension = self.decompress_file_gzip(outFileName)\n elif file_extension == '.zip':\n filename, file_extension = self.decompress_file_zip(filename)\n else:\n raise IOError('file type not supported')\n return filename, file_extension\n\n # Decompress .gz files\n def decompress_file_gzip(self, outFileName):\n inFile = gzip.open(self.input, 'rb')\n outFile = open(outFileName, 'wb')\n outFile.write(inFile.read())\n inFile.close()\n outFile.close()\n _, fileExtension = os.path.splitext(outFileName)\n return outFileName, fileExtension\n\n # Decompress .zip files\n def decompress_file_zip(self, outfilename):\n with zf.ZipFile(self.input, \"r\") as zip_ref:\n zip_ref.extractall(\"../inputs/\")\n _, fileExtension = os.path.splitext(outfilename)\n return outfilename, fileExtension\n\n# def get_mxml_events_data(self, filename, parameters):\n# \"\"\"read and parse all the events information from a MXML file\"\"\"\n# temp_data = list()\n# tree = ET.parse(filename)\n# root = tree.getroot()\n# process = root.find('Process')\n# procInstas = process.findall('ProcessInstance')\n# i = 0\n# for procIns in procInstas:\n# sup.print_progress(((i / (len(procInstas) - 1)) * 100), 'Reading log traces ')\n# caseid = procIns.get('id')\n# auditTrail = procIns.findall('AuditTrailEntry')\n# for trail in auditTrail:\n# task = ''\n# user = ''\n# event_type = ''\n# timestamp = ''\n# attributes = trail.find('Data').findall('Attribute')\n# for attr in attributes:\n# if (attr.get('name') == 'concept:name'):\n# task = attr.text\n# if (attr.get('name') == 'lifecycle:transition'):\n# event_type = attr.text\n# if (attr.get('name') == 'org:resource'):\n# user = attr.text\n# event_type = trail.find('EventType').text\n# timestamp = trail.find('Timestamp').text\n# timestamp = datetime.datetime.strptime(trail.find('Timestamp').text[:-6], parameters['timeformat'])\n# temp_data.append(\n# dict(caseid=caseid, task=task, event_type=event_type, user=user, start_timestamp=timestamp,\n# end_timestamp=timestamp))\n# i += 1\n# raw_data = temp_data\n# temp_data = self.reorder_mxml(temp_data)\n# sup.print_done_task()\n# return temp_data, raw_data\n# def reorder_mxml(self, temp_data):\n# \"\"\"this method joints the duplicated events on the .mxml log\"\"\"\n# data = list()\n# start_events = list(filter(lambda x: x['event_type'] == 'start', temp_data))\n# finish_events = list(filter(lambda x: x['event_type'] == 'complete', temp_data))\n# for x, y in zip(start_events, finish_events):\n# data.append(dict(caseid=x['caseid'], task=x['task'], event_type=x['event_type'],\n# user=x['user'], start_timestamp=x['start_timestamp'], end_timestamp=y['start_timestamp']))\n# return data\n# # TODO manejo de excepciones\n# def find_first_task(self):\n# \"\"\"finds the first task\"\"\"\n# cases = list()\n# [cases.append(c['caseid']) for c in self.data]\n# cases = sorted(list(set(cases)))\n# first_task_names = list()\n# for case in cases:\n# trace = sorted(list(filter(lambda x: (x['caseid'] == case), self.data)), key=itemgetter('start_timestamp'))\n# first_task_names.append(trace[0]['task'])\n# first_task_names = list(set(first_task_names))\n# return first_task_names\n# def read_resource_task(self,task,roles):\n# \"\"\"returns the resource that performs a task\"\"\"\n# filtered_list = list(filter(lambda x: x['task']==task, self.data))\n# role_assignment = list()\n# for task in filtered_list:\n# for role in roles:\n# for member in role['members']:\n# if task['user']==member:\n# role_assignment.append(role['role'])\n# return max(role_assignment)\n", "sub_path": "support_modules/readers/log_reader.py", "file_name": "log_reader.py", "file_ext": "py", "file_size_in_byte": 18220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 57, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 57, "usage_type": "name"}, {"api_name": "support_modules.support.print_performed_task", "line_number": 73, "usage_type": "call"}, {"api_name": "support_modules.support", "line_number": 73, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "attribute"}, {"api_name": "support_modules.support.print_done_task", "line_number": 117, "usage_type": "call"}, {"api_name": "support_modules.support", "line_number": 117, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 123, "usage_type": "call"}, {"api_name": "support_modules.support.print_performed_task", "line_number": 195, "usage_type": "call"}, {"api_name": "support_modules.support", "line_number": 195, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 217, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 219, "usage_type": "call"}, {"api_name": "support_modules.support.print_done_task", "line_number": 225, "usage_type": "call"}, {"api_name": "support_modules.support", "line_number": 225, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 252, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 286, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 299, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}]} +{"seq_id": "272926869", "text": "import pymysql\n\n\ndef execute_sql(sql):\n try:\n print(\"开始连接..\")\n db = pymysql.connect(\"127.0.0.1\", \"test\", \"123456\", \"test\")\n cursor = db.cursor()\n print(\"开始执行...\")\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n print(e)\n finally:\n try:\n db.close()\n except Exception as e1:\n print(e1)\n\n\nusers = execute_sql(\"select * from user\")\nprint(type(users))\nprint(users)\n", "sub_path": "test/pymysql_test.py", "file_name": "pymysql_test.py", "file_ext": "py", "file_size_in_byte": 494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pymysql.connect", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "356062495", "text": "from nltk.corpus import stopwords\nimport gensim\nimport re\nimport numpy as np\nfrom collections import namedtuple\nimport csv\nimport timeit\nimport random\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom pprint import pprint\n\ndef opencsv(file_name):\n rows = []\n with open(file_name) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n rows.append(row)\n csvfile.close()\n return rows\n\ndef onet_read(file_name):\n rows = opencsv(file_name)\n onet_list = {}\n for row in rows:\n onet_list[row['id']] = row['name']\n return onet_list\n\ndef add_title(row_dict, onet_list):\n ## Note: *3 or *2 or 1 \n if row_dict['theonet'] in onet_list:\n onet_string = onet_list[row_dict['theonet']]\n else:\n onet_string = 'unclassified job'\n \n if len(row_dict['carotene_title']) == 0:\n \tcaro_string = ' '\n else:\n \tcaro_string = row_dict['carotene_title'] \n \n if len(row_dict['job_list_title']) == 0:\n \ttitle_string = ' '\n else:\n \ttitle_string = row_dict['job_list_title'] \n \n if len(row_dict['jobreq']) == 0:\n \trec_string = ' '\n else:\n \trec_string = row_dict['jobreq'] \n\n row_old = (onet_string+' ')*3 + (caro_string+' ')*3 + (title_string+' ')*3 + row_dict['jobdesc'] + (rec_string + ' ')*3\n return row_old\n\ndef clean_row(row_old, prefilter, mat):\n clean_row_tmp = [] \n soup = BeautifulSoup(row_old, 'html.parser')\n row_parts = soup.findAll(text=True)\n row = ' '.join(row_parts) \n word_list = list(filter(None, mat.sub(' ', row).split(' ')))\n \n for word in word_list:\n if word.lower() not in prefilter and len(word) >= 2:\n clean_row_tmp.append(word.lower())\n return clean_row_tmp\n\ndef clean(data_name, add):\n print ('start cleaning')\n start = timeit.default_timer()\n\n sen_indx = 0\n doc = namedtuple('doc', 'words tags')\n prefilter = set(stopwords.words('english'))\n prefilter.add('nbsp')\n mat=re.compile('[^A-Za-z0-9]')\n onet_list = onet_read('/home/search/GBR_DL/DLWorkflows/DLMatcherWorkflow/onet_id_name.csv')\n\n job_key = 'jobdesc'\n job_rec = 'jobreq'\n job_id_key = 'did'\n job_onet = 'theonet'\n job_app = 'appnumber'\n job_title = 'job_list_title'\n job_carotene = 'carotene_title'\n\n job_city = 'cityname'\n job_loc = 'statename'\n\n vecs = []\n job_dict = {}\n cnt = 0\n with open(data_name) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n cnt=cnt+1\n ## Add Title or Not\n if row[job_id_key] is None:\n \tcontinue\n if add is False:\n row_tmp = row[job_key]\n else:\n row_tmp = add_title(row, onet_list)\n tmp = clean_row(row_tmp, prefilter, mat) \n if len(tmp) > 10: \n l = row[job_id_key]\n vecs.append(doc(tmp, ['DID_%s' % l]))\n\n job_dict[l] = {}\n job_dict[l][job_onet] = row[job_onet]\n job_dict[l]['indx'] = sen_indx\n sen_indx = sen_indx + 1\n job_dict[l][job_loc] = row[job_loc]\n job_dict[l][job_city] = row[job_city]\n if row[job_app] == 'NULL':\n job_dict[l][job_app] = '0'\n else:\n job_dict[l][job_app] = row[job_app] \n # job_dict[l][job_title] = row[job_title]\n # job_dict[l][job_carotene] = row[job_carotene]\n if cnt == 50000:\n break\n csvfile.close()\n\n with open('/home/search/GBR_DL/job_dict', 'wb') as handle:\n pickle.dump(job_dict, handle)\n handle.close()\n #pprint(job_dict)\n job_dict = {}\n\n stop = timeit.default_timer()\n print ('cleaned file # = ', len(vecs))\n print ('time clearning: ', stop - start)\n return vecs\n\ndef doc2vec_train(model_name, it, vecs):\n print (\"start training\")\n model = gensim.models.Doc2Vec(min_count=5, window=10, size=100, workers=16)\n model.build_vocab(vecs)\n\n start = timeit.default_timer()\n Idx = list(range(len(vecs)))\n for epoch in range(it):\n random.shuffle(Idx)\n perm_sentences = [vecs[i] for i in Idx]\n model.train(perm_sentences)\n stop = timeit.default_timer()\n print (\"time training:\", stop - start)\n model.save(model_name)\n return\n\nif __name__ == '__main__':\n data_name = '/home/search/GBR_DL/GBR_DL_training.csv'\n add = True\n \n vecs = clean(data_name, add)\n model_name = '/home/search/GBR_DL/model_test'\n it = 1\n doc2vec_train(model_name, it, vecs)\n\n\n\n\n\n\n\n\n", "sub_path": "YieldModelDLMWorkflow/DLMatcherWorkflowYieldModel/DL_train.py", "file_name": "DL_train.py", "file_ext": "py", "file_size_in_byte": 4626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "csv.DictReader", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 56, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 71, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 72, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 72, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 74, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 92, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 124, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 129, "usage_type": "call"}, {"api_name": "gensim.models.Doc2Vec", "line_number": 136, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 136, "usage_type": "attribute"}, {"api_name": "timeit.default_timer", "line_number": 139, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 142, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "357036930", "text": "from graphviz import Digraph,Graph\nimport numpy.random as npr\n\n# formatはpngを指定(他にはPDF, PNG, SVGなどが指定可)\n# G = Digraph(format='png',engine=\"fdp\")\nG = Digraph(format='png',engine=\"circo\")\nG.attr('node', shape='circle')\nG.attr(overlap='false')\n\n# ノードの追加\nname = \"arayama\"\nfor i in name:\n G.node(i, i)\n\n# 辺の追加\nfor i in range(1,len(name)):\n G.edge(name[i-1],name[i])\n\n# print()するとdot形式で出力される\nprint(G)\n\n# binary_tree.pngで保存\nG.render('graph1')\n\n\n", "sub_path": "source/infomation_math/graph1.py", "file_name": "graph1.py", "file_ext": "py", "file_size_in_byte": 517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "graphviz.Digraph", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "631666479", "text": "\"\"\"\nTODO:\n\n- Use pygame to show sudoku\n- Implement different techniques to solve (pointing pairs, hidden pairs, etc.)\n\"\"\"\n\nimport numpy as np\nimport pygame as pyg\nfrom write import write\n\npyg.init()\ndisp = pyg.display\ndisp.init()\n\nscreen = disp.set_mode((960, 720))\nscreen_w, screen_h = screen.get_size()\n\ndisp.set_caption('Sudoku Solver')\n\nBLACK = pyg.Color('black')\nWHITE = pyg.Color('white')\nBLUE = pyg.Color('blue')\n\nbg_col = WHITE\n\nscreen.fill(bg_col)\ndisp.flip()\n\ndef update():\n disp.flip()\n\ndef clear():\n screen.fill(bg_col)\n\ndef short_to_list(shorthand):\n s = str(shorthand)\n s = [s[i:i+9] for i in range(0, len(s), 9)]\n l = [list(i) for i in s]\n l = [[int(i) if i.isdigit() else 0 for i in l2] for l2 in l]\n return l\n\ndef update_pos_value(a, possible, r, c):\n p = possible.copy()\n cur_val = a[r, c]\n cur_row = p[r].tolist()\n cur_col = p.T[c].tolist()\n cur_squ = p[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3]\n\n # Update row & col\n cur_row = [[i for i in l if i != cur_val] for l in cur_row]\n cur_col = [[i for i in l if i != cur_val] for l in cur_col]\n\n # Update square\n cur_squ = [[[i for i in l if i != cur_val]\n for l in row]\n for row in cur_squ]\n\n p[r] = cur_row\n p.T[c] = cur_col\n p[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3] = cur_squ\n\n return p\n\ndef solvesudoku(sudoku, draw=False):\n global possible, a\n if type(sudoku) == str:\n a = np.array(short_to_list(sudoku))\n elif type(sudoku) in (list, np.array):\n a = np.array(sudoku)\n tot_rows, tot_cols = a.shape\n\n # Possible values array with appendable lists in axis 2\n possible = [[[] for i in range(tot_cols)] for i in range(tot_rows)]\n possible[0][0] = [0]\n possible = np.array(possible)\n possible[0][0] = []\n\n # Update possible values array\n for r in range(tot_rows):\n for c in range(tot_cols):\n if a[r, c] != 0:\n continue\n \n cur_row = a[r].tolist()\n cur_col = a.T[c].tolist()\n cur_squ = a[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3].flatten().tolist()\n all_check = cur_row + cur_col + cur_squ\n \n possible[r, c] = [i for i in range(1, 10) if i not in all_check]\n\n # Solve sudoku\n changed = True\n last_text = None\n b = False\n while changed:\n changed = False\n for r in range(tot_rows):\n for c in range(tot_cols):\n if a[r, c] != 0:\n possible[r, c] = []\n continue\n\n # Sudoku rows, cols, squs\n cur_row = a[r].tolist()\n cur_col = a.T[c].tolist()\n cur_squ = a[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3].flatten().tolist()\n all_values = cur_row + cur_col + cur_squ\n all_values_set = set(all_values)\n\n # Possible rows, cols, rows\n cur_pos_row = possible[r].tolist()\n cur_pos_col = possible.T[c].tolist()\n cur_pos_squ = possible[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3].flatten().tolist()\n cur_pos_squ_block = possible[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3]\n \n cur_pos_row.remove(possible[r, c])\n cur_pos_col.remove(possible[r, c])\n cur_pos_squ.remove(possible[r, c])\n\n cur_pos_row = [x for l in cur_pos_row for x in l]\n cur_pos_col = [x for l in cur_pos_col for x in l]\n cur_pos_squ = [x for l in cur_pos_squ for x in l]\n\n # Naked Singles\n if len(possible[r, c]) == 1:\n a[r, c] = possible[r, c][0]\n possible[r, c] = []\n possible = update_pos_value(a, possible, r, c)\n last_text = 'Naked Single at (%d, %d) changed to %d' %\\\n (r, c, a[r, c])\n print(last_text)\n changed = True\n\n # Hidden Singles\n for check in possible[r, c]:\n if cur_pos_row.count(check) == 0 or\\\n cur_pos_col.count(check) == 0 or\\\n cur_pos_squ.count(check) == 0:\n last_text = 'Hidden Single at (%d, %d) changed to %d' %\\\n (r, c, check)\n print(last_text)\n a[r, c] = check\n possible[r, c] = []\n possible = update_pos_value(a, possible, r, c)\n changed = True\n if draw:\n draw_sudoku(a, possible=possible, cur_square=(r, c), text=last_text)\n\n # Locked Candidates (Pointing)\n for r in range(0, tot_rows, 3):\n for c in range(0, tot_cols, 3):\n squ = a[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3].flatten().tolist()\n squ_block = a[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3]\n squ_pos = possible[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3].flatten().tolist()\n squ_pos_block = possible[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3]\n squ_pos_rows = [sum(l, []) for l in squ_pos_block]\n squ_pos_cols = [sum(l, []) for l in squ_pos_block.T]\n \n for check in set(sum(squ_pos, [])):\n in_row = [list(row).count(check) for row in squ_pos_rows]\n # See if there is only one row w/ value of check\n in_row_b = [i > 0 for i in in_row]\n if in_row_b.count(True) == 1: # If only one row\n index_True = in_row_b.index(True)\n if in_row[index_True] > 1:\n # Get rid of other possibilities\n cur_row = r + index_True\n row = possible[cur_row].tolist()[:c] +\\\n possible[cur_row].tolist()[c+3:]\n if any(check in l for l in row):\n # Update row\n possible[cur_row][:c] = [[i for i in l if i != check]\n for l in possible[cur_row][:c]]\n possible[cur_row][c+3:] = [[i for i in l if i != check]\n for l in possible[cur_row][c+3:]]\n changed = True\n last_text = 'Locked Candidates in row %d; removed %d from row' %\\\n (cur_row, check)\n print(last_text)\n if draw:\n draw_sudoku(a, possible=possible)\n break\n\n in_col = [list(col).count(check) for col in squ_pos_cols]\n # See if there is only one col w/ value of check\n in_col_b = [i > 0 for i in in_col]\n if in_col_b.count(True) == 1: # If only one row\n index_True = in_col_b.index(True)\n if in_col[index_True] > 1:\n # Get rid of other possibilities\n cur_col = c + index_True\n col = possible.T[cur_col].tolist()[:r] +\\\n possible.T[cur_col].tolist()[r+3:]\n if any(check in l for l in col):\n # Update col\n possible.T[cur_col][:r] = [[i for i in l if i != check]\n for l in possible.T[cur_col][:r]]\n possible.T[cur_col][r+3:] = [[i for i in l if i != check]\n for l in possible.T[cur_col][r+3:]]\n changed = True\n last_text = 'Locked Candidates in col %d; removed %d from col' %\\\n (cur_col, check)\n print(last_text)\n if draw:\n draw_sudoku(a, possible=possible)\n break\n \n \n draw_sudoku(a, possible=possible)\n return a\n \ndef solvesudoku_force(sudoku):\n a = np.array(sudoku)\n tot_rows, tot_cols = a.shape\n\n # Fixed values (coords)\n fixed = [(r, c) for r in range(tot_rows)\n for c in range(tot_cols) if a[r, c] > 0]\n\n # Possible values array with appendable lists in axis 2\n possible = [[[] for i in range(tot_cols)] for i in range(tot_rows)]\n possible[0][0] = [0]\n possible = np.array(possible)\n possible[0][0] = []\n\n r = 0\n c = 0\n\n go_back = False\n while not all(a.flatten()):\n if (r, c) in fixed and not go_back:\n c += 1\n if c > 8:\n c = 0\n r += 1\n continue\n elif (r, c) in fixed and go_back:\n c -= 1\n if c < 0:\n c = 8\n r -= 1\n continue\n \n cur_row = a[r].tolist()\n cur_col = a.T[c].tolist()\n cur_squ = a[3*(r//3):3*(r//3)+3,\n 3*(c//3):3*(c//3)+3].flatten().tolist()\n all_values = cur_row + cur_col + cur_squ\n\n if go_back:\n a[r, c] += 1\n while a[r, c] in all_values and a[r, c] <= 9:\n a[r, c] += 1\n\n if a[r, c] > 9: # If finished looping and none are valid\n a[r, c] = 0 # Clear square\n c -= 1\n if c < 0:\n c = 8\n r -= 1\n else: # If found a valid number\n go_back = False # Stop going back\n c += 1\n if c > 8:\n c = 0\n r += 1\n elif not go_back:\n a[r, c] += 1\n\n while a[r, c] in all_values and a[r, c] <= 9:\n a[r, c] += 1\n\n if a[r, c] > 9: # If finished looping and none are valid\n a[r, c] = 0 # Clear square\n go_back = True # Start going back\n c -= 1\n if c < 0:\n c = 8\n r -= 1\n else: # If found a valid number\n c += 1 # Advance\n if c > 8: # Go to next row if needed\n c = 0\n r += 1\n \n draw_sudoku(a)\n return a\n\n\ndef draw_sudoku(sudoku, possible=None, cur_square=None, text=None):\n a = sudoku.copy()\n tot_rows, tot_cols = a.shape\n\n clear()\n \n tile_size = 50\n font_size = 36\n \n start_x = dest_x = screen_w//2 - (tot_cols*tile_size)//2 + tile_size//2\n start_y = dest_y = screen_h//2 - (tot_rows*tile_size)//2 + tile_size//2\n\n t = 3\n \n # Draw lines\n for x in range(dest_x-tile_size//2 - 1, dest_x + tile_size*9 - 1, tile_size):\n if t == 3:\n width = 5\n t = 0\n else:\n width = 2\n pyg.draw.line(screen, BLACK, (x, dest_y-tile_size//2),\n (x, dest_y + int(tile_size*8.5)), width)\n t += 1\n\n t = 3\n for y in range(dest_y-tile_size//2 - 1, dest_y + tile_size*9 - 1, tile_size):\n if t == 3:\n width = 5\n t = 0\n else:\n width = 2\n pyg.draw.line(screen, BLACK, (dest_x-tile_size//2, y),\n (dest_x + int(tile_size*8.5), y), width)\n t += 1\n\n # Draw numbers\n for r in range(tot_rows):\n for c in range(tot_cols):\n if cur_square == (r, c):\n pyg.draw.circle(screen, BLUE, (dest_x, dest_y), 3)\n if a[r, c] == 0:\n if possible is not None: # Draw possible values\n tl_x = small_x = dest_x - tile_size//3\n tl_y = small_y = dest_y - tile_size//3\n\n for i in range(1, 10):\n if i in possible[r, c]:\n write(screen, str(i), BLACK, None,\n font_size//3, small_x, small_y)\n small_x += tile_size//3\n if small_x > tl_x + 2*tile_size//3:\n small_y += tile_size//3\n small_x = tl_x\n \n dest_x += tile_size\n continue\n write(screen, str(a[r, c]), BLACK, None, font_size, dest_x, dest_y)\n dest_x += tile_size\n dest_x = start_x\n dest_y += tile_size\n\n if text:\n write(screen, text, BLACK, None, font_size, screen_w//2, screen_h-75)\n update()\n\ntest_sudoku = [[5,3,0,0,7,0,0,0,0],\n [6,0,0,1,9,5,0,0,0],\n [0,9,8,0,0,0,0,6,0],\n [8,0,0,0,6,0,0,0,3],\n [4,0,0,8,0,3,0,0,1],\n [7,0,0,0,2,0,0,0,6],\n [0,6,0,0,0,0,2,8,0],\n [0,0,0,4,1,9,0,0,5],\n [0,0,0,0,8,0,0,7,9]]\n\nhard_sudoku = [[0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,3,0,8,5],\n [0,0,1,0,2,0,0,0,0],\n [0,0,0,5,0,7,0,0,0],\n [0,0,4,0,0,0,1,0,0],\n [0,9,0,0,0,0,0,0,0],\n [5,0,0,0,0,0,0,7,3],\n [0,0,2,0,1,0,0,0,0],\n [0,0,0,0,4,0,0,0,9]]\n\nautomorphic_sudoku = [[0,0,0,2,1,0,0,0,0],\n [0,0,7,3,0,0,0,0,0],\n [0,5,8,0,0,0,0,0,0],\n [4,3,0,0,0,0,0,0,0],\n [2,0,0,0,0,0,0,0,8],\n [0,0,0,0,0,0,0,7,6],\n [0,0,0,0,0,0,2,5,0],\n [0,0,0,0,0,7,3,0,0],\n [0,0,0,0,9,8,0,0,0]]\n\nhsym_sudoku = [[0,0,0,0,0,0,0,0,0],\n [1,2,0,0,0,0,0,8,4],\n [0,3,0,0,0,0,0,7,0],\n [0,0,4,0,0,0,6,0,0],\n [0,0,0,2,0,3,0,0,0],\n [0,0,5,0,0,0,9,0,0],\n [0,0,6,0,9,0,5,0,0],\n [0,7,0,0,0,0,0,2,0],\n [0,0,0,0,5,0,0,0,0]]\n\n\nlocked_sudoku = [[4,1,0,0,0,0,8,5,9],\n [0,0,0,0,0,0,0,0,0],\n [0,7,8,0,1,2,0,0,0],\n [6,0,0,2,5,0,9,0,1],\n [0,0,0,0,0,0,0,0,0],\n [1,0,7,0,8,6,0,0,4],\n [0,0,0,6,4,0,1,3,0],\n [0,0,0,0,0,0,0,0,0],\n [8,3,6,0,0,0,0,4,2]]\n\nlocked2_sudoku = [[0,0,0,6,0,0,1,0,3],\n [0,0,0,0,0,3,0,0,0],\n [3,0,5,0,0,1,0,4,2],\n [0,4,3,0,0,6,0,7,0],\n [2,0,0,5,0,7,0,0,8],\n [0,6,0,1,0,0,4,2,0],\n [1,7,0,3,0,0,2,0,4],\n [0,0,0,2,0,0,0,0,0],\n [8,0,2,0,0,4,0,0,0]]\n\nlocked3_sudoku = [[0,2,0,1,0,0,0,9,7],\n [1,0,0,4,0,0,0,0,0],\n [0,0,4,6,7,2,0,0,0],\n [0,0,0,0,0,7,6,5,0],\n [7,0,0,0,0,0,0,0,9],\n [0,4,1,9,0,0,0,0,0],\n [0,0,0,2,9,6,1,0,0],\n [0,0,0,0,0,1,0,0,2],\n [3,1,0,0,0,5,0,4,0]]\n", "sub_path": "Sudoku Solver.py", "file_name": "Sudoku Solver.py", "file_ext": "py", "file_size_in_byte": 15634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "pygame.init", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 234, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 322, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 333, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 333, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 341, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 341, "usage_type": "attribute"}, {"api_name": "write.write", "line_number": 349, "usage_type": "call"}, {"api_name": "write.write", "line_number": 358, "usage_type": "call"}, {"api_name": "write.write", "line_number": 364, "usage_type": "call"}]} +{"seq_id": "362159196", "text": "from scipy import misc\nfrom keras.models import load_model\nimport numpy as np\n\n\n# This is just an idea I had about trying to pick a trajectory through the latent space, following it and recording all of the images\n\ngenerator = load_model('models/generator.h5')\n\nntrain = 1\nnoise_gen = np.random.uniform(0,1,size=[100])\n\n\ninputs = np.tile(noise_gen, (1000,1))\ninputs = np.dot(np.diag(np.arange(0, 1, step=0.001)) , inputs)\n\n\n\ngenerated_images = generator.predict(inputs)\n\ngenerated_images += 0.5\ngenerated_images *= 255\n\nprint(\"Saving images\")\n\n#The images repeat themsevles... I don't know why.\n\nfor i,image in enumerate(generated_images):\n misc.imsave('output/'+str(i)+'.jpg', image)\n\n\n", "sub_path": "src/experiments/interpolate.py", "file_name": "interpolate.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "keras.models.load_model", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.misc.imsave", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "246208673", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport logging\nimport json\nimport os\nimport time\nimport sys\nfrom copy import copy\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom telegram import Chat\nfrom datetime import datetime\n\n_port = int(os.environ.get('PORT', '9000'))\n_webhook = \"%s%s\" % (os.environ[\"WEB_HOOK\"], os.environ[\"BOT_TOKEN\"])\n_token = os.environ[\"BOT_TOKEN\"]\n_location = os.environ[\"URL_LOCATION\"]\n_certificate = os.environ[\"CERTIFICATE\"]\n_listen = \"127.0.0.1\"\n\n# Enable logging\nlogging.basicConfig(stream=sys.stderr, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\n\n# Group title : Group id\n# Bot forwards messages from group identified by title to group identified by id\npairing = {\n \"Simulator\": -1001265460962,\n \"NEM::Red\": -1001265460962,\n \"NEM Czech & Slovak Republic\": -1001265460962,\n \"NEMberia 2.0\": -1001265460962\n}\n\ndef admin(bot, update):\n logger.info(\"Title: '%s' ID: %d\" % (update.message.chat.title, update.message.chat.id))\n if (update.message.chat.title not in pairing):\n return\n\n chat_id = pairing[update.message.chat.title]\n\n if (update.message.chat.username):\n bot.send_message(chat_id, \"Alert in https://t.me/%s/%s\" % (update.message.chat.username, update.message.message_id))\n else:\n bot.send_message(chat_id, \"Alert in %s\" % update.message.chat.title)\n\ndef check(bot, update):\n if (update.message.text.strip() in (\"/admin\", \"/ban\", \"/kick\", \"/spam\", \"/scam\")):\n admin(bot, update)\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.error('Update \"%s\" caused error \"%s\"', update, error)\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the EventHandler and pass it your bot's token.\n logger.info(\"Creating updater object with token: '%s'\" % (_token))\n\n updater = Updater(_token)\n\n i = 0\n while i < 2:\n try:\n logger.info(\"Starting webhook '%s' %d '%s'\" % (_listen, _port, _location))\n updater.start_webhook(listen=_listen, port=_port, url_path=_location)\n updater.bot.set_webhook(url=_webhook, certificate=open(_certificate, 'rb'))\n break\n except Exception as e:\n logger.error(\"Exception: %s\" % e)\n updater.stop()\n #endtry\n\n i += 1\n time.sleep(1)\n #endwhile\n \n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"admin\", admin))\n dp.add_handler(CommandHandler(\"ban\", admin))\n dp.add_handler(CommandHandler(\"kick\", admin))\n dp.add_handler(CommandHandler(\"spam\", admin))\n dp.add_handler(CommandHandler(\"scam\", admin))\n dp.add_handler(MessageHandler(Filters.text, check))\n\n # log all errors\n dp.add_error_handler(error)\n\n logger.info(\"Running\")\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 3198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 60, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 82, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 83, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 84, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 85, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 86, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 87, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 87, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "487900742", "text": "\"\"\"\nThis example demonstrates how to use NumPy to do image transition.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport sys\n\ndef image_load(filename):\n return plt.imread(filename)\n\n\ndef image_gen(file1, file2, steps=30):\n \"\"\"Generator for image arrays.\"\"\"\n img1 = image_load(file1) # load the two image files into ndarrays\n img2 = image_load(file2)\n if img1.shape != img2.shape:\n print(\"Error: the two images have different shapes.\", file=sys.stderr)\n exit(2)\n \n # go from img1 to img2 than back to img1. s varies from 0 to 1 and then back to 0:\n svalues = np.hstack([np.linspace(0.0, 1.0, steps), np.linspace(1.0, 0, steps)])\n\n # construct now the list of images, so that we don't have to repeat that later:\n images = [np.uint8(img1 * (1.0 - s) + img2 * s) for s in svalues] \n\n # get a new image as a combination of img1 and img2\n while True: # repeat all images in a loop\n for img in images:\n yield img \n \nfig = plt.figure()\n# create image plot and indicate this is animated. Start with an image.\nim = plt.imshow(image_load(\"florida-keys-800-480.jpg\"), interpolation='none', animated=True)\n\n# the two images must have the same shape:\nimggen = image_gen(\"florida-keys-800-480.jpg\", \"Grand_Teton-800-480.jpg\", steps=30)\n\n# updatefig is called for each frame, each update interval:\ndef updatefig(*args):\n global imggen\n img_array = next(imggen) # get next image animation frame\n im.set_array(img_array) # set it. FuncAnimation will display it\n return (im,)\n\n# create animation object that will call function updatefig every 60 ms\nani = animation.FuncAnimation(fig, updatefig, interval=60, blit=False)\nplt.title(\"Image transformation\")\nplt.show()\n", "sub_path": "animation-transition (1).py", "file_name": "animation-transition (1).py", "file_ext": "py", "file_size_in_byte": 1823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.pyplot.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "588121221", "text": "#!/usr/bin/python3\n\nimport json\nfrom hashlib import sha1\nfrom pathlib import Path\n\nfrom brownie._config import ARGV\nfrom brownie.network.state import _get_current_dependencies\nfrom brownie.project.scripts import _get_ast_hash\nfrom brownie.test import coverage\n\nSTATUS_SYMBOLS = {\"passed\": \".\", \"skipped\": \"s\", \"failed\": \"F\"}\n\nSTATUS_TYPES = {\n \".\": \"passed\",\n \"s\": \"skipped\",\n \"F\": \"failed\",\n \"E\": \"error\",\n \"x\": \"xfailed\",\n \"X\": \"xpassed\",\n}\n\n\nclass TestManager:\n def __init__(self, project):\n self.project = project\n self.project_path = project._project_path\n self.active_path = None\n self.count = 0\n self.results = None\n self.isolated = set()\n glob = self.project_path.glob(\"tests/**/conftest.py\")\n self.conf_hashes = dict((self._path(i.parent), _get_ast_hash(i)) for i in glob)\n try:\n with self.project_path.joinpath(\"build/tests.json\").open() as fp:\n hashes = json.load(fp)\n except (FileNotFoundError, json.decoder.JSONDecodeError):\n hashes = {\"tests\": {}, \"contracts\": {}, \"tx\": {}}\n\n self.tests = dict(\n (k, v)\n for k, v in hashes[\"tests\"].items()\n if Path(k).exists() and self._get_hash(k) == v[\"sha1\"]\n )\n build = self.project._build\n self.contracts = dict((k, v[\"bytecodeSha1\"]) for k, v in build.items() if v[\"bytecode\"])\n changed_contracts = set(\n k\n for k, v in hashes[\"contracts\"].items()\n if k not in self.contracts or v != self.contracts[k]\n )\n if changed_contracts:\n for txhash, coverage_eval in hashes[\"tx\"].items():\n if not changed_contracts.intersection(coverage_eval.keys()):\n coverage._add_cached_transaction(txhash, coverage_eval)\n self.tests = dict(\n (k, v)\n for k, v in self.tests.items()\n if v[\"isolated\"] is not False and not changed_contracts.intersection(v[\"isolated\"])\n )\n else:\n for txhash, coverage_eval in hashes[\"tx\"].items():\n coverage._add_cached_transaction(txhash, coverage_eval)\n\n def _path(self, path):\n return str(Path(path).absolute().relative_to(self.project_path))\n\n def set_isolated_modules(self, paths):\n self.isolated = set(self._path(i) for i in paths)\n\n def _get_hash(self, path):\n hash_ = _get_ast_hash(path)\n for confpath in filter(lambda k: k in path, sorted(self.conf_hashes)):\n hash_ += self.conf_hashes[confpath]\n return sha1(hash_.encode()).hexdigest()\n\n def check_updated(self, path):\n path = self._path(path)\n if path not in self.tests or not self.tests[path][\"isolated\"]:\n return False\n if ARGV[\"coverage\"] and not self.tests[path][\"coverage\"]:\n return False\n for txhash in self.tests[path][\"txhash\"]:\n coverage._check_cached(txhash, False)\n return True\n\n def module_completed(self, path):\n path = self._path(path)\n isolated = False\n if path in self.isolated:\n isolated = [i for i in _get_current_dependencies() if i in self.contracts]\n txhash = coverage._get_active_txlist()\n coverage._clear_active_txlist()\n if not ARGV[\"coverage\"] and (path in self.tests and self.tests[path][\"coverage\"]):\n txhash = self.tests[path][\"txhash\"]\n self.tests[path] = {\n \"sha1\": self._get_hash(path),\n \"isolated\": isolated,\n \"coverage\": ARGV[\"coverage\"] or (path in self.tests and self.tests[path][\"coverage\"]),\n \"txhash\": txhash,\n \"results\": \"\".join(self.results),\n }\n\n def save_json(self):\n txhash = set(x for v in self.tests.values() for x in v[\"txhash\"])\n coverage_eval = dict((k, v) for k, v in coverage.get_coverage_eval().items() if k in txhash)\n report = {\"tests\": self.tests, \"contracts\": self.contracts, \"tx\": coverage_eval}\n with self.project_path.joinpath(\"build/tests.json\").open(\"w\") as fp:\n json.dump(report, fp, indent=2, sort_keys=True, default=sorted)\n\n def set_active(self, path):\n path = self._path(path)\n if path == self.active_path:\n self.count += 1\n return\n self.active_path = path\n self.count = 0\n if path in self.tests and ARGV[\"update\"]:\n self.results = list(self.tests[path][\"results\"])\n else:\n self.results = []\n\n def check_status(self, report):\n if report.when == \"setup\":\n self._skip = report.skipped\n if len(self.results) < self.count + 1:\n self.results.append(\"s\" if report.skipped else None)\n if report.failed:\n self.results[self.count] = \"E\"\n return \"error\", \"E\", \"ERROR\"\n return \"\", \"\", \"\"\n if report.when == \"teardown\":\n if report.failed:\n self.results[self.count] = \"E\"\n return \"error\", \"E\", \"ERROR\"\n elif self._skip:\n report.outcome = STATUS_TYPES[self.results[self.count]]\n return \"skipped\", \"s\", \"SKIPPED\"\n return \"\", \"\", \"\"\n if hasattr(report, \"wasxfail\"):\n self.results[self.count] = \"x\" if report.skipped else \"X\"\n if report.skipped:\n return \"xfailed\", \"x\", \"XFAIL\"\n elif report.passed:\n return \"xpassed\", \"X\", \"XPASS\"\n self.results[self.count] = STATUS_SYMBOLS[report.outcome]\n return report.outcome, STATUS_SYMBOLS[report.outcome], report.outcome.upper()\n", "sub_path": "brownie/test/_manager.py", "file_name": "_manager.py", "file_ext": "py", "file_size_in_byte": 5705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "brownie.project.scripts._get_ast_hash", "line_number": 33, "usage_type": "call"}, {"api_name": "json.load", "line_number": 36, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "brownie.test.coverage._add_cached_transaction", "line_number": 55, "usage_type": "call"}, {"api_name": "brownie.test.coverage", "line_number": 55, "usage_type": "name"}, {"api_name": "brownie.test.coverage._add_cached_transaction", "line_number": 63, "usage_type": "call"}, {"api_name": "brownie.test.coverage", "line_number": 63, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 66, "usage_type": "call"}, {"api_name": "brownie.project.scripts._get_ast_hash", "line_number": 72, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 75, "usage_type": "call"}, {"api_name": "brownie._config.ARGV", "line_number": 81, "usage_type": "name"}, {"api_name": "brownie.test.coverage._check_cached", "line_number": 84, "usage_type": "call"}, {"api_name": "brownie.test.coverage", "line_number": 84, "usage_type": "name"}, {"api_name": "brownie.network.state._get_current_dependencies", "line_number": 91, "usage_type": "call"}, {"api_name": "brownie.test.coverage._get_active_txlist", "line_number": 92, "usage_type": "call"}, {"api_name": "brownie.test.coverage", "line_number": 92, "usage_type": "name"}, {"api_name": "brownie.test.coverage._clear_active_txlist", "line_number": 93, "usage_type": "call"}, {"api_name": "brownie.test.coverage", "line_number": 93, "usage_type": "name"}, {"api_name": "brownie._config.ARGV", "line_number": 94, "usage_type": "name"}, {"api_name": "brownie._config.ARGV", "line_number": 99, "usage_type": "name"}, {"api_name": "brownie.test.coverage.get_coverage_eval", "line_number": 106, "usage_type": "call"}, {"api_name": "brownie.test.coverage", "line_number": 106, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 109, "usage_type": "call"}, {"api_name": "brownie._config.ARGV", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "494189296", "text": "# -*- encoding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Python standard library\nimport operator\nimport os\nimport re\nfrom copy import copy\n# Backport needed if Python 2 is used\nfrom enum import IntEnum\nfrom fractions import Fraction\nfrom functools import reduce\nfrom functools import total_ordering\n\n# External dependencies\nimport attr\nimport pandas as pd\n\nfrom ..data import get_latex\nfrom ..data import get_mass_width\nfrom ..data import get_special\n\n# The path of this file (used to load data files)\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef programmatic_name(name):\n 'Return a name safe to use as a variable name'\n return (name.replace('(', '').replace(')', '')\n .replace('*', '').replace('::', '_')\n .replace('-', 'm').replace('+', 'p')\n .replace('~', 'bar'))\n\n\ngetname = re.compile(r'''\n^ # Beginning of string\n (?P \\w+? ) # One or more characters, non-greedy\n(?:\\( (?P \\d+ ) \\) # Optional state in ()\n (?= \\*? \\( ) )? # - lookahead for mass\n (?P \\* )? # Optional star\n(?:\\( (?P \\d+ ) \\) )? # Optional mass in ()\n (?P bar )? # Optional bar\n (?P [0\\+\\-][+-]?) # Required 0, -, --, or +, ++\n$ # End of string\n''', re.VERBOSE)\n\n\nclass SpinType(IntEnum):\n 'The spin type of a particle'\n Scalar = 1 # (0, 1)\n PseudoScalar = -1 # (0,-1)\n Vector = 2 # (1,-1)\n Axial = -2 # (1, 1)\n Tensor = 3 # (2, 1)\n PseudoTensor = -3 # (2,-1)\n Unknown = 0 # (0, 0)\n\n\nclass Par(IntEnum):\n 'Represents parity or charge'\n pp = 2\n p = 1\n o = 0\n m = -1\n mm = -2\n u = 5\n\n\nCharge = Par\n\n\nclass Inv(IntEnum):\n 'Definition of what happens when particle is inverted'\n Same = 0\n Full = 1\n Barless = 2\n\n\nclass Status(IntEnum):\n 'The status of the particle'\n Common = 0\n Rare = 1\n Unsure = 2\n Further = 3\n Nonexistant = 4\n\n\n# Mappings that allow the above classes to be produced from text mappings\nPar_mapping = {'+': Par.p, '0': Par.o, '+2/3': Par.u,\n '++': Par.pp, '-': Par.m, '-1/3': Par.u, '?': Par.u, '': Par.o}\nInv_mapping = {'': Inv.Same, 'F': Inv.Full, 'B': Inv.Barless}\nStatus_mapping = {'R': Status.Common, 'D': Status.Rare, 'S': Status.Unsure, 'F': Status.Further}\n\n# Mappings that allow the above classes to be turned into text mappings\nPar_undo = {Par.pp: '++', Par.p: '+', Par.o: '0', Par.m: '-', Par.mm: '--', Par.u: '?'}\nPar_prog = {Par.pp: 'pp', Par.p: 'p', Par.o: '0', Par.m: 'm', Par.mm: 'mm', Par.u: 'u'}\n\n\ndef get_from_latex(filename):\n \"\"\"\n Produce a pandas series from a file with latex mappings in itself.\n The file format is the following: PDGID ParticleLatexName AntiparticleLatexName.\n \"\"\"\n latex_table = pd.read_csv(filename, index_col=0)\n series_real = latex_table.particle\n series_anti = latex_table.antiparticle\n series_anti.index = -series_anti.index\n return pd.concat([series_real, series_anti])\n\n\ndef get_from_pdg(filename, latexes=None):\n 'Read a file, plus a list of latex files, to produce a pandas DataFrame with particle information'\n\n def unmap(mapping):\n return lambda x: mapping[x.strip()]\n\n # Convert each column from text to appropriate data type\n PDG_converters = dict(\n Charge=unmap(Par_mapping),\n G=unmap(Par_mapping),\n P=unmap(Par_mapping),\n C=unmap(Par_mapping),\n A=unmap(Inv_mapping),\n Rank=lambda x: int(x.strip()) if x.strip() else 0,\n ID=lambda x: int(x.strip()) if x.strip() else -1,\n Status=unmap(Status_mapping),\n Name=lambda x: x.strip(),\n I=lambda x: x.strip(), # noqa: E741\n J=lambda x: x.strip(),\n Quarks=lambda x: x.strip()\n )\n\n # Read in the table, apply the converters, add names, ignore comments\n pdg_table = pd.read_csv(filename, comment='*', names='Mass,MassUpper,MassLower,Width,WidthUpper,WidthLower,I,G,J,P,C,A,'\n 'ID,Charge,Rank,Status,Name,Quarks'.split(','),\n converters=PDG_converters\n )\n\n # Filtering out non-particles (quarks, negative IDs)\n pdg_table = pdg_table[pdg_table.Charge != Par.u]\n pdg_table = pdg_table[pdg_table.ID >= 0]\n\n # PDG's ID should be the key to table\n pdg_table.set_index('ID', inplace=True)\n\n # Some post processing to produce inverted particles\n pdg_table_inv = pdg_table[(pdg_table.A == Inv.Full)\n | ((pdg_table.A == Inv.Barless)\n # Maybe add? & (pdg_table.Charge != Par.u)\n & (pdg_table.Charge != Par.o))].copy()\n pdg_table_inv.index = -pdg_table_inv.index\n pdg_table_inv.loc[(pdg_table_inv.A != Inv.Same) & (\n pdg_table_inv.Charge != Par.u), 'Charge'] *= -1\n pdg_table_inv.Quarks = (pdg_table_inv.Quarks.str.swapcase()\n .str.replace('SQRT', 'sqrt')\n .str.replace('P', 'p').str.replace('Q', 'q')\n .str.replace('mAYBE NON', 'Maybe non')\n .str.replace('X', 'x').str.replace('Y', 'y'))\n\n # Make a combined table with + and - ID numbers\n full = pd.concat([pdg_table, pdg_table_inv])\n\n # Add the latex\n if latexes is None:\n latexes = (get_latex(),)\n latex_series = pd.concat([get_from_latex(latex) for latex in latexes])\n full = full.assign(Latex=latex_series)\n\n # Return the table, making sure NaNs are just empty strings\n return full.fillna('')\n\n\ndef mkul(upper, lower):\n 'Utility to print out an uncertainty with different or identical upper/lower bounds'\n if upper == lower:\n if upper == 0:\n return ''\n else:\n return '± {upper:g}'.format(upper=upper)\n else:\n return '+ {upper:g} - {lower:g}'.format(upper=upper, lower=lower)\n\n\n@total_ordering\n@attr.s(slots=True, cmp=False)\nclass Particle(object):\n 'The Particle object class. Hold a series of properties for a particle.'\n val = attr.ib()\n name = attr.ib()\n mass = attr.ib()\n width = attr.ib()\n charge = attr.ib()\n A = attr.ib() # Info about particle name for anti-particles\n rank = attr.ib(0) # Next line is Isospin\n I = attr.ib(None) # noqa: E741\n J = attr.ib(None) # Total angular momentum\n G = attr.ib(Par.u) # Parity: '', +, -, or ?\n P = attr.ib(Par.u) # Space parity\n C = attr.ib(Par.u) # Charge conjugation parity\n # (B (just charge), F (add bar) , and '' (No change))\n quarks = attr.ib('')\n status = attr.ib(Status.Nonexistant)\n latex = attr.ib('')\n mass_upper = attr.ib(0.0)\n mass_lower = attr.ib(0.0)\n width_upper = attr.ib(0.0)\n width_lower = attr.ib(0.0)\n\n # Make a class level property that holds the PDG table. Loads on first access (via method)\n _pdg_table = None\n\n @classmethod\n def load_pdg_table(cls, files=None, latexes=None):\n 'Load a PDG table. Will be called on first access to the PDG table'\n if files is None:\n files = (get_mass_width(), get_special())\n tables = [get_from_pdg(f, latexes) for f in files]\n cls._pdg_table = pd.concat(tables)\n\n @classmethod\n def pdg_table(cls):\n 'Get the PDG table. Loads on first access.'\n if cls._pdg_table is None:\n cls.load_pdg_table()\n return cls._pdg_table\n\n # The following __le__ and __eq__ needed for total ordering (sort, etc)\n\n def __le__(self, other):\n # Sort by absolute particle numbers\n # The positive one should come first\n return abs(self.val - .25) < abs(other.val - .25)\n\n def __eq__(self, other):\n return self.val == other.val\n\n def __hash__(self):\n return hash(self.val)\n\n @property\n def radius(self):\n 'Particle radius, hard coded'\n if abs(self.val) in [411, 421, 431]:\n return 5\n else:\n return 1.5\n\n @property\n def bar(self):\n 'Check to see if particle is inverted'\n return self.val < 0 and self.A == Inv.Full\n\n @property\n def spin_type(self): # -> SpinType:\n 'Access the SpinType enum'\n if self.J in [0, 1, 2]:\n J = int(self.J)\n\n if self.P == Par.p:\n return (SpinType.Scalar, SpinType.Axial, SpinType.Tensor)[J]\n elif self.P == Par.m:\n return (SpinType.PseudoScalar, SpinType.Vector, SpinType.PseudoTensor)[J]\n\n return SpinType.Unknown\n\n def invert(self):\n \"Get the antiparticle\"\n other = copy(self)\n if self.A == Inv.Full or (self.A == Inv.Barless and self.charge != Par.o):\n other.val = -self.val\n\n if self.charge != Par.u:\n other.charge = -self.charge\n\n try:\n other.quarks = (self.quarks.swapcase()\n .replace('SQRT', 'sqrt')\n .replace('P', 'p').replace('Q', 'q')\n .replace('mAYBE NON', 'Maybe non')\n .replace('X', 'x').replace('Y', 'y'))\n except AttributeError:\n pass\n return other\n\n # Pretty descriptions\n\n def __str__(self):\n return self.name + ('~' if self.A == Inv.Full and self.val < 0 else '') + Par_undo[self.charge]\n\n def _repr_latex_(self):\n name = self.latex\n if self.bar:\n name = re.sub(r'^(\\\\mathrm{|)([\\w\\\\]\\w*)', r'\\1\\\\bar{\\2}', name)\n return (\"$\" + name + '$') if self.latex else '?'\n\n def describe(self):\n 'Make a nice high-density string for a particle\\'s properties.'\n if self.val == 0:\n return \"Name: Unknown\"\n\n val = \"\"\"Name: {self.name:<10} ID: {self.val:<12} Fullname: {self!s:<14} Latex: {self._repr_latex_()}\n Mass = {self.mass!s:<10} {mass} GeV\n Width = {self.width!s:<10} {width} GeV\n I (isospin) = {self.I!s:<6} G (parity) = {Par_undo[self.G]:<5} Q (charge) = {Par_undo[self.charge]}\n J (total angular) = {self.J!s:<6} C (charge parity) = {Par_undo[self.C]:<5} P (space parity) = {Par_undo[self.P]}\n\"\"\".format(self=self, Par_undo=Par_undo, mass=mkul(self.mass_upper, self.mass_lower), width=mkul(self.width_upper, self.width_lower))\n\n if self.spin_type != SpinType.Unknown:\n val += \" SpinType: {self.spin_type!s}\\n\".format(self=self)\n if self.quarks:\n val += \" Quarks: {self.quarks}\\n\".format(self=self)\n val += \" Antiparticle status: {self.A.name}\\n\".format(self=self)\n val += \" Radius: {self.radius} GeV\".format(self=self)\n return val\n\n @property\n def programmatic_name(self):\n 'This name could be used for a variable name'\n name = self.name\n name += '_' + Par_prog[self.charge]\n return programmatic_name(name)\n\n @property\n def html_name(self):\n 'This is the name using HTML instead of LaTeX'\n name = self.latex\n name = re.sub(r'\\^\\{(.*?)\\}', r'\\1', name)\n name = re.sub(r'\\_\\{(.*?)\\}', r'\\1', name)\n name = re.sub(r'\\\\mathrm\\{(.*?)\\}', r'\\1', name)\n name = re.sub(r'\\\\left\\[(.*?)\\\\right\\]', r'[\\1] ', name)\n name = name.replace(r'\\pi', 'π').replace(r'\\rho', 'ρ').replace(r'\\omega', 'ω')\n if self.bar:\n name += '~'\n return name\n\n @classmethod\n def empty(cls):\n 'Get a new empty particle'\n return cls(0, 'Unknown', 0., 0., 0, Inv.Same)\n\n @classmethod\n def from_pdgid(cls, val):\n 'Get a particle from a PDGID.'\n if val == 0:\n return cls.empty()\n else:\n col = cls.pdg_table().loc[val]\n J = Fraction(col.J) if col.J not in {'2or4', '?'} else col.J\n I = Fraction(col.I) if col.I not in {'', '<2', '?'} else col.I # noqa: 741\n name = col.Name\n if abs(val) == 313:\n name += '(892)'\n return cls(val, name, col.Mass/1000, col.Width/1000, Par(col.Charge), Inv(col.A),\n col.Rank,\n I, J,\n Par(col.G), Par(col.P), Par(col.C),\n col.Quarks, Status(col.Status),\n latex=col.Latex,\n mass_upper=col.MassUpper/1000,\n mass_lower=col.MassLower/1000,\n width_upper=col.WidthUpper/1000,\n width_lower=col.WidthLower/1000,)\n\n @classmethod\n def from_search_list(cls, name=None, latex=None, name_re=None, latex_re=None, particle=None, **search_terms):\n 'Search for a particle, returning a list of candidates'\n\n for term in list(search_terms):\n if search_terms[term] is None:\n del search_terms[term]\n\n # If J or I is passed, make sure it is a string\n if not isinstance(search_terms.get('J', ''), str):\n search_terms['J'] = str(search_terms['J'])\n if not isinstance(search_terms.get('J', ''), str):\n search_terms['I'] = str(search_terms['I'])\n\n bools = [cls.pdg_table()[term] == match for term, match in search_terms.items()]\n\n if name is not None:\n bools.append(cls.pdg_table().Name.str.contains(str(name), regex=False))\n if name_re is not None:\n bools.append(cls.pdg_table().Name.str.contains(name_re, regex=True))\n if latex is not None:\n bools.append(cls.pdg_table().Latex.str.contains(str(latex), regex=False))\n if latex_re is not None:\n bools.append(cls.pdg_table().Latex.str.contains(latex_re, regex=True))\n if particle is not None:\n bools.append(cls.pdg_table().index > 0 if particle else cls.pdg_table().index < 0)\n\n results = cls.pdg_table()[reduce(operator.and_, bools)]\n return [cls.from_pdgid(r) for r in results.index]\n\n @classmethod\n def from_search(cls, name=None, latex=None, name_re=None, latex_re=None, **search_terms):\n 'Require that your each returns one and only one result'\n results = cls.from_search_list(name, latex, name_re=name_re,\n latex_re=latex_re, **search_terms)\n if len(results) == 1:\n return results[0]\n elif len(results) == 0:\n raise RuntimeError('Did not find particle')\n else:\n raise RuntimeError(\"Found too many particles\")\n\n @classmethod\n def from_AmpGen(cls, name):\n 'Get a particle from an AmpGen style name'\n mat = getname.match(name)\n mat = mat.groupdict()\n\n Par_mapping = {'++': 2, '+': 1, '0': 0, '-': -1, '--': 2}\n particle = False if mat['bar'] is not None else (True if mat['charge'] == '0' else None)\n\n fullname = mat['name']\n if mat['state']:\n fullname += '({mat[state]})'.format(mat=mat)\n\n if mat['mass']:\n maxname = fullname + '({mat[mass]})'.format(mat=mat)\n else:\n maxname = fullname\n\n vals = cls.from_search_list(Name=maxname,\n Charge=Par_mapping[mat['charge']],\n particle=particle,\n J=mat['state'])\n if not vals:\n vals = cls.from_search_list(Name=fullname,\n Charge=Par_mapping[mat['charge']],\n particle=particle,\n J=mat['state'])\n\n if len(vals) > 1 and mat['mass'] is not None:\n vals = [val for val in vals if mat['mass'] in val.latex]\n\n if len(vals) > 1:\n vals = sorted(vals)\n\n return vals[0]\n", "sub_path": "decaylanguage/particle/particle.py", "file_name": "particle.py", "file_ext": "py", "file_size_in_byte": 15992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}, {"api_name": "re.VERBOSE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "enum.IntEnum", "line_number": 51, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 62, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 75, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 82, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 164, "usage_type": "call"}, {"api_name": "data.get_latex", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 169, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 191, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 192, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 193, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 194, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 195, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 196, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 197, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 198, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 199, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 200, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 201, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 202, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 204, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 205, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 206, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 207, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 208, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 209, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 210, "usage_type": "call"}, {"api_name": "data.get_mass_width", "line_number": 219, "usage_type": "call"}, {"api_name": "data.get_special", "line_number": 219, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 221, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 271, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 296, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 330, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 331, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 332, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 333, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 351, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 352, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 394, "usage_type": "call"}, {"api_name": "operator.and_", "line_number": 394, "usage_type": "attribute"}, {"api_name": "functools.total_ordering", "line_number": 187, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "9392392", "text": "from nltk.tokenize import TextTilingTokenizer\nfrom nltk.tokenize import sent_tokenize\nfrom bs4 import BeautifulSoup\n\nimport re\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass DocumentTokenizer:\n \"\"\"\n Initialize with option (str) specifying what tokenizer to use:\n self.tokenizer(id, contents) returns JSON array of tokenized contents\n \"\"\"\n def __init__(self, option):\n self.tokenizer = getattr(self, option)\n \n ## Try different tokenizing logic here\n \n ## 1) Use with raw=False, i.e. the parsed FIELD_BODY from lucene Documents\n \n def text_sentencer(self, id, contents):\n # pass in transformed content from document for now\n # using raw content may require different parsing for\n # different document types, e.g. html vs. xml\n results = []\n try:\n tokens = sent_tokenize(contents)\n if (len(tokens) > 100000):\n logger.error(str(len(tokens)) + \" tiles from document \" + str(id))\n \n for (i, tile) in enumerate(tokens):\n tile_id = '{}.{:06d}'.format(id, i)\n results.append({'id': tile_id, 'contents': tile})\n except:\n # error tokenizing, write as one tile\n logger.error(\"error tokenizing, write as one tile\")\n results.append({'id':'{}.{:06d}'.format(id, 0), \n 'contents': contents})\n \n return results\n ", "sub_path": "src/main/python/passage_retrieval/document_tokenizer.py", "file_name": "document_tokenizer.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "606758379", "text": "'''Single node, multi-GPUs training.'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed as dist\n\nimport argparse\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom models import *\nfrom utils import progress_bar\n\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Distributed Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--local_rank', default=0, type=int, help='current process rank')\nparser.add_argument('--world_size', default=1, type=int, help='total number of ranks')\nargs = parser.parse_args()\n\n\n# Init process group\nprint(\"Initialize Process Group...\")\ndist.init_process_group(backend='nccl', init_method='tcp://localhost:23456',\n rank=args.local_rank, world_size=args.world_size)\ntorch.cuda.set_device(args.local_rank)\n\n# Init Model\nprint(\"Initialize Model...\")\nnet = EfficientNetB0().cuda()\nnet = nn.SyncBatchNorm.convert_sync_batchnorm(net)\nnet = torch.nn.parallel.DistributedDataParallel(\n net, device_ids=[args.local_rank], output_device=args.local_rank)\n\ncriterion = nn.CrossEntropyLoss().cuda()\noptimizer = torch.optim.SGD(net.parameters(), 1e-3, momentum=0.9, weight_decay=1e-4)\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n])\n\ntrainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\ntrainsampler = torch.utils.data.distributed.DistributedSampler(trainset)\ntrainloader = torch.utils.data.DataLoader(\n trainset, batch_size=128, shuffle=(trainsampler is None), num_workers=2, pin_memory=True, sampler=trainsampler)\n\ntestset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(\n testset, batch_size=100, shuffle=False, num_workers=2, pin_memory=False)\n\n\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = F.cross_entropy(outputs, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n\ndef test(epoch):\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = net(inputs)\n loss = F.cross_entropy(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n\nfor epoch in range(200):\n trainsampler.set_epoch(epoch)\n train(epoch)\n test(epoch)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3747, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.distributed.init_process_group", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.cuda.set_device", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.SyncBatchNorm", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 47, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 49, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.utils.data.distributed.DistributedSampler", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 58, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 73, "usage_type": "name"}, {"api_name": "utils.progress_bar", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 96, "usage_type": "name"}, {"api_name": "utils.progress_bar", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "245254948", "text": "#get_info_list --- push_it\r\n\r\nfrom datetime import datetime\r\nimport requests\r\n\r\ndef get_info_list():\r\n api = 'https://api.github.com/search/repositories?q='\r\n query = 'topic:crawler+language:python+'\r\n when = 'created:' + str(datetime.now()).split()[0]\r\n full_url = api + query + when\r\n r = requests.get(full_url)\r\n return r.json()['item']\r\n\r\ndef make_message(repo_info):\r\n title = repo_info['name']\r\n url = repo_info['html_url']\r\n message = repo_info['description']\r\n token = 'xxx' #这里需要换成你的 token\r\n user = 'xxx' #这里需要换成你的 pushover user id\r\n api = 'https://api.pushover.net/1/messages.json?'\r\n template = 'token={token}&user={user}&message={msg}&title={t}&url={url}'\r\n query = template.format(\r\n token = token,\r\n user = user,\r\n msg = message,\r\n t = title,\r\n url = url\r\n )\r\n full_url = api + query\r\n return full_url\r\n\r\ndef push_it(message):\r\n requests.post(message)\r\n print('Done')\r\n\r\ninfo_list = get_info_list()\r\nfor info in info_list:\r\n message = make_message(info)\r\n push_it(message)", "sub_path": "day6.py", "file_name": "day6.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "395697261", "text": "\"\"\" Module \"\"\"\nimport glob\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom matplotlib.lines import Line2D\n\n# on linux\nrcParams['font.family'] = \"DejaVu Sans Mono\"\n# on windows\n# rcParams['font.family'] = \"monospace\"\n\n\nplt.rcParams.update({\n \"lines.color\": \"white\",\n \"patch.edgecolor\": \"white\",\n \"text.color\": \"white\",\n \"axes.facecolor\": \"black\",\n \"axes.edgecolor\": \"white\",\n \"axes.labelcolor\": \"white\",\n \"xtick.color\": \"goldenrod\",\n \"ytick.color\": \"goldenrod\",\n \"grid.color\": \"black\",\n \"figure.facecolor\": \"black\",\n \"figure.edgecolor\": \"black\",\n \"savefig.facecolor\": \"black\",\n \"font.monospace\": \"monospace\",\n \"savefig.edgecolor\": \"black\",\n \"figure.max_open_warning\": 0})\n\n\nclass FstData:\n def __init__(self, fstfile, chromosome, simulated=False):\n self.data = None\n self.simulated = False\n if not simulated:\n with open(fstfile) as f:\n self.data = [line.rstrip('\\n') for line in f]\n else:\n self.simulated = True\n with open(fstfile) as f:\n self.data = [line.rstrip('\\n') for line in f if not line.startswith('region')]\n self.pos1 = [int(line.split(',')[0].split('-')[0]) for line in self.data]\n self.pos2 = [int(line.split(',')[0].split('-')[1]) for line in self.data]\n self.startpos = self.pos1[0]\n self.endpos = self.pos2[-1]\n self.region = '{}:{}-{}'.format(chromosome, str(self.startpos), str(self.endpos))\n self.dict = None\n\n def dictionary(self):\n self.dict = {idx: None for idx, col in enumerate(self.data[0].split(','))}\n del self.dict[0]\n for key in self.dict.keys():\n fst_list = list()\n for line in self.data:\n fst_list.append(round(float(line.split(',')[key]), 4))\n self.dict[key] = fst_list\n\n def max(self, maxval):\n tracker = maxval\n for key in self.dict.keys():\n freqs_check = self.dict[key]\n new_max = round(max(freqs_check), 2)\n if new_max > tracker:\n tracker = new_max\n return tracker\n\n\nclass FstClass:\n\n def __init__(self, chromosome):\n \"\"\"Gets the list of positions from the file\"\"\"\n self.chromosome = chromosome\n self.expdat1A_ud = 'Exp_Up1A_Dwn1A_Fst.dat'\n self.expdat2A_ud = 'Exp_Up2A_Dwn2A_Fst.dat'\n # self.expdatUpB1 = 'Exp_Up1B_CtrlB_Fst.dat'\n # self.expdatUpB2 = 'Exp_Up2B_CtrlB_Fst.dat'\n self.expdat1B_ud = 'Exp_Up1B_Dwn1B_Fst.dat'\n self.expdat2B_ud = 'Exp_Up2B_Dwn2B_Fst.dat'\n # self.expdatDwnB1 = 'Exp_Dwn1B_CtrlB_Fst.dat'\n # self.expdatDwnB2 = 'Exp_Dwn2B_CtrlB_Fst.dat'\n self.cdat = 'Exp_CtrlA_CtrlB_Fst.dat'\n self.simdat = glob.glob('*_Simulation_Fst.dat')[0]\n self.region = None\n self.expdat1A_udobj = FstData(self.expdat1A_ud, self.chromosome)\n self.expdat2A_udobj = FstData(self.expdat2A_ud, self.chromosome)\n # self.expdatUpB1obj = FstData(self.expdatUpB1, self.chromosome)\n # self.expdatUpB2obj = FstData(self.expdatUpB2, self.chromosome)\n self.expdat1B_udobj = FstData(self.expdat1B_ud, self.chromosome)\n self.expdat2B_udobj = FstData(self.expdat2B_ud, self.chromosome)\n # self.expdatDwnB1obj = FstData(self.expdatDwnB1, self.chromosome)\n # self.expdatDwnB2obj = FstData(self.expdatDwnB2, self.chromosome)\n self.cdatobj = FstData(self.cdat, self.chromosome)\n self.simdatobj = FstData(self.simdat, self.chromosome, simulated=True)\n\n if self.expdat1A_udobj.region == self.expdat2A_udobj.region:\n if self.expdat1B_udobj.region == self.expdat2B_udobj.region:\n self.region = self.expdat1A_udobj.region\n else:\n print(\"Problem\")\n quit()\n else:\n print('Problem')\n quit()\n self.expdat1A_udobj.dictionary()\n self.expdat2A_udobj.dictionary()\n # self.expdatUpB1obj.dictionary()\n # self.expdatUpB2obj.dictionary()\n self.expdat1B_udobj.dictionary()\n self.expdat2B_udobj.dictionary()\n # self.expdatDwnB1obj.dictionary()\n # self.expdatDwnB2obj.dictionary()\n self.cdatobj.dictionary()\n self.simdatobj.dictionary()\n\n self.range_list = list()\n for a, b in zip(self.expdat1A_udobj.pos1, self.expdat1A_udobj.pos2):\n # subtract 1, so that range\n self.range_list.append(range(a, b))\n self.outputfile = 'Fst_data.png'\n # self.title = 'Comparing Up & Down Haplotype Frequencies (1Kb windows)'\n self.x_label = 'Genomic Coordinate: Chromosome Arm {}'.format(self.chromosome)\n # TODO: need to add variance to Fst when calculating so you can make error bars in graph (possibly)\n self.y_label = 'Fst (Between Haplotype Frequency Windows)'\n self.fig = None\n self.ax = None\n self.ymax = None\n self.simcolormap = ['honeydew']\n self.expdatAcolormap = ['orangered']\n self.expdatBcolormap = ['mediumslateblue']\n # self.expdatDwnAcolormap = ['orange']\n # self.expdatDwnBcolormap = ['mediumorchid']\n self.ctrlcolormap = ['yellow']\n # self.expdatBcolormap = ['pink']\n # self.expdatDwnAcolormap = ['chartreuse']\n # self.expdatDwnBcolormap = ['yellowgreen']\n\n def find_ymax(self):\n max_val = self.expdat1A_udobj.max(0)\n max_val = self.expdat2A_udobj.max(max_val)\n # max_val = self.expdatUpB1obj.max(max_val)\n # max_val = self.expdatUpB2obj.max(max_val)\n max_val = self.expdat1B_udobj.max(max_val)\n max_val = self.expdat2B_udobj.max(max_val)\n # max_val = self.expdatDwnB1obj.max(max_val)\n # max_val = self.expdatDwnB2obj.max(max_val)\n max_val = self.cdatobj.max(max_val)\n max_val = self.simdatobj.max(max_val)\n self.ymax = round(max_val, 2)\n # print(str(self.ymax))\n\n def easy_ymax(self):\n self.ymax = 0.99\n\n def plotfst(self, axis, datadict, coloriter, xlimit, alpha_val, linetype='-', linethick=3.5):\n for key, color_iterator in zip(datadict.keys(), coloriter):\n y_data = list()\n for rng, freq in zip(self.range_list, datadict[key]):\n y_data.extend([freq for _ in range(1, 1000)])\n axis.plot(range(1, xlimit + 1), y_data, color=color_iterator, linestyle=linetype, alpha=alpha_val,\n linewidth=linethick)\n\n def simfst(self, axis, datadict, color_iterator, xlimit, alpha_val, linetype='-'):\n for key in range(1, 3):\n y_data = list()\n e_data = list()\n # err_key = key + 3\n err_list = datadict[key + 2]\n # TODO: sample size will change\n errlst = [1.96*(math.sqrt(x)/math.sqrt(40)) for x in err_list]\n for rng, freq, er in zip(self.range_list, datadict[key], errlst):\n y_data.extend([freq for _ in range(1, 1000)])\n e_data.extend([(freq + er, freq - er) for _ in range(1, 1000)])\n e_data0 = [tup[0] for tup in e_data]\n e_data1 = [tup[1] for tup in e_data]\n axis.plot(range(1, xlimit + 1), y_data, color=color_iterator[0], linestyle=linetype, alpha=alpha_val,\n linewidth=3.5)\n axis.fill_between(range(1, xlimit + 1), e_data0, e_data1, color=color_iterator, alpha=alpha_val)\n\n def plot(self):\n self.fig, self.ax = plt.subplots(nrows=2, ncols=1, figsize=(30, 24))\n self.ax[0].set_ylim([0, self.ymax + 0.05])\n self.ax[1].set_ylim([0, self.ymax + 0.05])\n\n xlim = len(range(1, 1000)) * len(self.range_list)\n self.ax[0].set_xlim([1, xlim])\n self.ax[1].set_xlim([1, xlim])\n # self.ax.set_title(self.title, fontsize=20)\n self.plotfst(self.ax[0], self.expdat1A_udobj.dict, self.expdatAcolormap, xlim, 1.0)\n self.plotfst(self.ax[0], self.expdat2A_udobj.dict, self.expdatAcolormap, xlim, 0.5)\n # self.plotfst(self.ax[0], self.expdatUpB1obj.dict, self.expdatBcolormap, xlim, 1.0)\n # self.plotfst(self.ax[0], self.expdatUpB2obj.dict, self.expdatBcolormap, xlim, 0.5)\n self.plotfst(self.ax[1], self.expdat1B_udobj.dict, self.expdatBcolormap, xlim, 1.0)\n self.plotfst(self.ax[1], self.expdat2B_udobj.dict, self.expdatBcolormap, xlim, 0.5)\n # self.plotfst(self.ax[1], self.expdatDwnB1obj.dict, self.expdatDwnBcolormap, xlim, 1.0)\n # self.plotfst(self.ax[1], self.expdatDwnB2obj.dict, self.expdatDwnBcolormap, xlim, 0.5)\n self.simfst(self.ax[0], self.simdatobj.dict, self.simcolormap, xlim, 0.8, '--')\n self.simfst(self.ax[1], self.simdatobj.dict, self.simcolormap, xlim, 0.8, '--')\n self.plotfst(self.ax[0], self.cdatobj.dict, self.ctrlcolormap, xlim, 0.8, '--', linethick=4)\n self.plotfst(self.ax[1], self.cdatobj.dict, self.ctrlcolormap, xlim, 0.8, '--', linethick=4)\n\n custom_lines1 = [Line2D([0], [0], color='orangered', lw=3.5, label='Up1A v Dwn1A'),\n Line2D([0], [0], color='orangered', lw=3.5, alpha=0.5, label='Up2A v Dwn2A'),\n Line2D([0], [0], color='yellow', lw=4, alpha=0.8, linestyle='--', label='ControlA v '\n 'ControlB'),\n Line2D([0], [0], color='honeydew', lw=3.5, alpha=0.8, label='Simulations')]\n custom_lines2 = [Line2D([0], [0], color='mediumslateblue', lw=3.5, label='Up1B v Dwn1B'),\n Line2D([0], [0], color='mediumslateblue', lw=3.5, alpha=0.5, label='Up2B v Dwn2B'),\n Line2D([0], [0], color='yellow', lw=4, linestyle='--', alpha=0.8, label='ControlA v '\n 'ControlB'),\n Line2D([0], [0], color='honeydew', lw=3.5, alpha=0.8, label='Simulations')]\n # self.ax.set_xticks([idx for idx, s in enumerate(self.positions)])\n xticks = list(range(1, xlim, 1000))\n xticklables = [\"{:,}\".format(x) for x in self.expdat1A_udobj.pos1]\n # yrange = range(0, self.ymax + 0.05, 0)\n # yticks = [0.2, 0.4, 0.6, 0.8]\n # yticklabels = ['0.2', '0.4', '0.6', '0.8']\n legen1 = self.ax[0].legend(fontsize=16, handles=custom_lines1, loc='upper left')\n legen2 = self.ax[1].legend(fontsize=16, handles=custom_lines2, loc='upper left')\n\n plt.setp(legen1.get_texts(), color='w')\n plt.setp(legen2.get_texts(), color='w')\n\n self.ax[0].set_xticks(xticks[0::30])\n self.ax[0].set_xticklabels(xticklables[0::30])\n plt.setp(self.ax[0].get_xticklabels(), fontsize=17)\n plt.setp(self.ax[0].get_yticklabels(), fontsize=17)\n self.ax[0].set_ylabel(self.y_label, fontsize=20)\n self.ax[0].set_xlabel(self.x_label, fontsize=20)\n self.ax[1].set_xticks(xticks[0::30])\n self.ax[1].set_xticklabels(xticklables[0::30])\n plt.setp(self.ax[1].get_xticklabels(), fontsize=17)\n plt.setp(self.ax[1].get_yticklabels(), fontsize=17)\n # self.ax[1].set_ylabel(self.y_label, fontsize=17)\n self.ax[1].set_xlabel(self.x_label, fontsize=20)\n self.ax[1].set_ylabel(self.y_label, fontsize=20)\n\n\nif __name__ == '__main__':\n import os\n contig = '3R'\n # listA = list(range(5, 32, 2))[:-1]\n # listB = list(range(5, 32, 2))[1:]\n x1 = 4200000\n x2 = 32073015\n # x2 = 3\n # for x1, x2 in zip(listA, listB):\n # os.chdir(f'C:\\\\Users\\\\ltjon\\\\Data\\\\Mel2018_Experimental_Haplotype_Graphs\\\\{contig}_{x1}-{x2}\\\\Fst_data')\n os.chdir(f'/home/solid-snake/Data/mel_simulations2018/{contig}_{x1}-{x2}/Fst_data')\n plotobj = FstClass(contig)\n plotobj.easy_ymax()\n plotobj.plot()\n # os.chdir(f'C:\\\\Users\\\\ltjon\\\\Data\\\\Mel2018_Experimental_Haplotype_Graphs\\\\{contig}_{x1}-{x2}')\n os.chdir(f'/home/solid-snake/Data/mel_simulations2018/{contig}_{x1}-{x2}')\n plotobj.fig.savefig(f'{contig}_{x1}-{x2}_UpvDown_Fst.png', bbox_inches='tight')\n plt.clf()\n", "sub_path": "graphing/fst_upVdown.py", "file_name": "fst_upVdown.py", "file_ext": "py", "file_size_in_byte": 12165, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "matplotlib.rcParams", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 83, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 249, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}]} +{"seq_id": "485917811", "text": "import requests\n\n# resp = requests.get('https://api.punkapi.com/v2/beers')\n# if resp.status_code != 200:\n# # This means something went wrong.\n# raise ApiError('GET /beers/ {}'.format(resp.status_code))\n# for beers in resp.json():\n# print('{} Name:{} ABV:{}'.format(beers['id'], beers['name'],beers['abv']))\n\nbeer = {\"id\": 999, \"name\": \"adri beer\" }\nresp = requests.post('https://api.punkapi.com/v2/beers', json=beer)\nif resp.status_code != 201:\n print(resp.status_code)\nelse: \n print('Created beer. ID: {}'.format(resp.json()[\"id\"]))", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "requests.post", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "282669341", "text": "import os\r\nimport re\r\nimport argparse\r\nimport pandas as pd\r\nimport numpy as np\r\nimport warnings\r\nimport matplotlib.pyplot as plt\r\nimport itertools\r\n\r\ntry:\r\n from numba import jit, prange\r\nexcept ImportError:\r\n numba_opt = False\r\nelse:\r\n numba_opt = True\r\n\r\nnumba_opt = False\r\n\r\ndef get_cmd():\r\n \"\"\" get command line arguments for data processing\r\n \"\"\"\r\n parse = argparse.ArgumentParser()\r\n\r\n mains = parse.add_argument_group('mains')\r\n filters = parse.add_argument_group('filters')\r\n adjustments = parse.add_argument_group('adjustments')\r\n outputs = parse.add_argument_group('output')\r\n\r\n # MAIN\r\n mains.add_argument('-fName', type=str, help='Data file to process', nargs='+')\r\n mains.add_argument('-fType', type=str, help='Instrument', default='labrecque')\r\n mains.add_argument('-plot', dest='plot', action='store_true', help='plot information on data filtering')\r\n mains.add_argument('-elec_coordinates', type=str, help='file with electrode coordinates: x y z columns')\r\n\r\n # FILTERS\r\n # voltage\r\n filters.add_argument('-v', dest='v', action='store_true', help='perform minimum voltage check')\r\n filters.add_argument('-v_min', type=float, default=1E-5, help='min voltage value')\r\n # reciprocal\r\n filters.add_argument('-rec', dest='rec', action='store_true', help='perform reciprocal check')\r\n filters.add_argument('-rec_max', type=float, default=10, help='max value for reciprocal error')\r\n filters.add_argument('-rec_couple', dest='rec_couple', action='store_true', help='couple reciprocal measurements')\r\n filters.add_argument('-rec_keep_unpaired', dest='rec_keep_unpaired', action='store_true', help='keep measurements without reciprocal')\r\n # stacking\r\n filters.add_argument('-stk', dest='stk', action='store_true', help='perform standard deviation check')\r\n filters.add_argument('-stk_max', type=float, default=10, help='max value from stacking error')\r\n\r\n # rhoa and k\r\n filters.add_argument('-k', dest='k', action='store_true', help='perform geometric factor check')\r\n filters.add_argument('-k_max', dest='k_max', type=float, help='maximum geometric factor', default=500)\r\n filters.add_argument('-k_file', type=str, help='file containing the geometrical factors') # fromat a b m n r k ...\r\n filters.add_argument('-rhoa', dest='rhoa', action='store_true', help='perform rhoa check')\r\n filters.add_argument('-rhoa_min', type=float, default=2, help='min rhoa value')\r\n filters.add_argument('-rhoa_max', type=float, default=500, help='max rhoa value')\r\n # contact resistance\r\n filters.add_argument('-ctc', dest='ctc', action='store_true', help='perform contact resistance check')\r\n filters.add_argument('-ctc_max', type=float, default=1E+5) # ohm\r\n\r\n # OUTPUT\r\n outputs.add_argument('-wrt_rhoa', dest='wrt_rhoa', action='store_true', help='include rhoa column in the output')\r\n outputs.add_argument('-wrt_ip', dest='wrt_ip', action='store_true', help='include phase column in the output')\r\n\r\n # ADJUSTMENTS\r\n adjustments.add_argument('-shift_abmn', type=int, default=0, help='shift abmn in data table')\r\n adjustments.add_argument('-shift_meas', type=int, default=0, help='shift measurement number')\r\n adjustments.add_argument('-shift_elec', type=int, default=0, help='shift the electrode number in the electrode table')\r\n # DEFAULTS\r\n parse.set_defaults(stk=False, rec=False, volt=False, rhoa=False, ctc=False, k=False,\r\n rec_couple=False, rec_keep_unpaired=False, wrt_rhoa=False, wrt_ip=False, plot=False)\r\n\r\n args = parse.parse_args()\r\n return(args)\r\n\r\n\r\ndef check_cmd(args):\r\n \"\"\" make sure cmd arguments agree \"\"\"\r\n \r\n print('\\n', '-' * 80)\r\n\r\n if args.wrt_rhoa and not args.rhoa:\r\n raise ValueError('the calculation of rhoa is necessary to add it as a column in the output file, see args.rhoa and args.wrt_rhoa')\r\n\r\n if args.rec_couple and not args.rec:\r\n raise ValueError('processing the reciprocals is necessary to couple them, see args.rec and args.rec_couple')\r\n\r\n if args.fName == 'all':\r\n print('all files in current directory will be processed')\r\n else:\r\n print(args.fName, 'will be processed')\r\n\r\n return(args)\r\n\r\n\r\ndef output_files(fname, extension='.dat'):\r\n \"\"\"define names for the output file and clean them if already exist\"\"\"\r\n fname_noExtension = os.path.splitext(fname)[0]\r\n output_fname = fname_noExtension + extension\r\n try:\r\n os.remove(output_fname)\r\n except:\r\n pass\r\n return(output_fname)\r\n\r\n\r\ndef read_labrecque(FileName=None):\r\n \"\"\" read a labrecque data file an return data and electrode dataframes\"\"\"\r\n\r\n print('reading ', FileName)\r\n\r\n reex = r'[-+]?[.]?[\\d]+[\\.]?\\d*(?:[eE][-+]?\\d+)?'\r\n\r\n ElecAllColumns, DataAllColumns = [], []\r\n AppRes = False\r\n with open(FileName) as fid:\r\n for l in itertools.takewhile(lambda x: 'elec_start' not in x, fid):\r\n if 'Appres' in l:\r\n print('Apperent Resisitivity column was found')\r\n AppRes = True\r\n elif 'FStcks' in l:\r\n print('Data file in Frequency Domain')\r\n FreDom = True\r\n elif 'TStcks' in l:\r\n print('Data file in Time Domain')\r\n FreDom = False\r\n for l in itertools.takewhile(lambda x: 'elec_end' not in x, itertools.islice(fid, 1, None)):\r\n ElecAllColumns.append(re.findall(reex, l))\r\n for l in itertools.takewhile(lambda x: 'data_end' not in x, itertools.islice(itertools.dropwhile(lambda x: 'data_start' not in x, fid), 3, None)):\r\n if 'TX Resist. out of range' in l:\r\n print('!!! TX Resist. out of range, data num: ', re.findall(reex, l))\r\n else:\r\n DataAllColumns.append(re.findall(reex, l))\r\n # data\r\n ap = int(AppRes)\r\n fd = int(FreDom)\r\n num_meas, a, b, m, n, r, v, stk, ctc, day, time = 0, 2, 4, 6, 8, 9 + ap, 11 + ap + fd * 2, 12 + ap + fd * 2, 15 + ap + fd * 5, -6, -5\r\n ip = 10 + ap # define here for structure consistency, then set data[:, ip] = 0\r\n datanp = np.array(DataAllColumns, dtype=np.float)[:, [num_meas, a, b, m, n, r, ip, v, ctc, stk, day, time]]\r\n datadf = pd.DataFrame(datanp)\r\n data_headers = ['meas', 'a', 'b', 'm', 'n', 'r', 'ip', 'v', 'ctc', 'stk', 'day', 'time']\r\n datadf.rename(columns=dict(zip(range(len(data_headers)), data_headers)), inplace=True)\r\n datadf['stk'] = abs(datadf['stk'] / datadf['v']) * 100\r\n # electrodes\r\n num_elec, x, y, z = 1, 2, 3, 4\r\n elecnp = np.array(ElecAllColumns, dtype=np.float)[:, [num_elec, x, y, z]]\r\n elecdf = pd.DataFrame(elecnp)\r\n elec_headers = ['num', 'x', 'y', 'z']\r\n elecdf.rename(columns=dict(zip(range(len(elec_headers)), elec_headers)), inplace=True)\r\n\r\n if not FreDom:\r\n datadf['ip'] = None\r\n\r\n return(elecdf, datadf)\r\n\r\n\r\ndef read_bert(k_file=None):\r\n\r\n with open(k_file) as fid:\r\n lines = fid.readlines()\r\n elec_num = int(lines[0])\r\n data_num = int(lines[elec_num + 2])\r\n\r\n elec_raw = pd.read_csv(k_file, delim_whitespace=True, skiprows=1, nrows=elec_num, header=None)\r\n elec = elec_raw[elec_raw.columns[:-1]]\r\n elec.columns = elec_raw.columns[1:]\r\n data_raw = pd.read_csv(k_file, delim_whitespace=True, skiprows=elec_num + 3, nrows=data_num)\r\n data = data_raw[data_raw.columns[:-1]]\r\n data.columns = data_raw.columns[1:]\r\n return(elec, data)\r\n\r\n\r\ndef fun_rec(a: np.ndarray, b: np.ndarray, m: np.ndarray, n: np.ndarray, x: np.ndarray):\r\n l = int(len(x))\r\n rec_num = np.zeros_like(x)\r\n rec_avg = np.zeros_like(x)\r\n rec_err = np.zeros_like(x)\r\n rec_fnd = np.zeros_like(x)\r\n for i in range(l):\r\n if rec_num[i] != 0:\r\n continue\r\n for j in range(i + 1, l):\r\n if (a[i] == m[j] and b[i] == n[j] and m[i] == a[j] and n[i] == b[j]):\r\n avg = (x[i] + x[j]) / 2\r\n err = abs(x[i] - x[j]) / abs(avg) * 100\r\n rec_num[i] = j + 1\r\n rec_num[j] = i + 1\r\n rec_avg[i] = avg\r\n rec_avg[j] = avg\r\n rec_err[i] = err\r\n rec_err[j] = err\r\n rec_fnd[i] = 1 # mark the meas with reciprocals, else leave 0\r\n rec_fnd[j] = 2 # distinguish between directs and reciprocals\r\n break\r\n return(rec_num, rec_avg, rec_err, rec_fnd)\r\n\r\nif numba_opt:\r\n fun_rec = jit(signature_or_function='UniTuple(float64[:],4)(int32[:],int32[:],int32[:],int32[:],float64[:])',\r\n nopython=True, parallel=False, cache=True, fastmath=True, nogil=True)(fun_rec)\r\n\r\n\r\nclass ERTdataset():\r\n \"\"\" A dataset class composed of two dataframes data and elec.\r\n The class does rely on delegation for many functionalities, useful functionalities are:\r\n * pandas.combine_first : to set data from another dataframe\r\n * to shift elec nums or meas num, just act on the dataframe specific columns\r\n * pandas.rename\r\n * dataframe.to_numpy : to ease and speed calculations (especially row-wise)\r\n \"\"\"\r\n\r\n def __init__(self, data=None, elec=None,\r\n data_headers=['meas', 'a', 'b', 'm', 'n',\r\n 'r', 'k', 'rhoa', 'ip', 'v', 'ctc', 'stk', 'day', 'time',\r\n 'rec_num', 'rec_fnd', 'rec_avg', 'rec_err', 'rec_ip_avg', 'rec_ip_err',\r\n 'rec_valid', 'k_valid', 'rhoa_valid', 'v_valid', 'ctc_valid', 'stk_valid', 'valid'],\r\n data_dtypes={'meas': 'Int16', 'a': 'Int16', 'b': 'Int16', 'm': 'Int16', 'n': 'Int16',\r\n 'r': float, 'k': float, 'rhoa': float, 'ip': float,\r\n 'v': float, 'ctc': float, 'stk': float, 'day': 'Int64', 'time': 'Int64',\r\n 'rec_num': 'Int16', 'rec_fnd': bool,\r\n 'rec_avg': float, 'rec_err': float,\r\n 'rec_ip_avg': float, 'rec_ip_err': float,\r\n 'rec_valid': bool, 'k_valid': bool, 'rhoa_valid': bool, 'v_valid': bool,\r\n 'ctc_valid': bool, 'stk_valid': bool, 'valid': bool},\r\n elec_headers=['num', 'x', 'y', 'z'],\r\n elec_dtypes={'num': 'Int16', 'x': float, 'y': float, 'z': float}\r\n ):\r\n\r\n self.data = None\r\n self.data_headers = data_headers\r\n self.data_dtypes = data_dtypes\r\n self.elec = None\r\n self.elec_headers = elec_headers\r\n self.elec_dtypes = elec_dtypes\r\n\r\n if data is not None:\r\n self.init_EmptyData(data_len=len(data))\r\n self.data.update(data)\r\n self.data = self.data.astype(self.data_dtypes)\r\n\r\n if elec is not None:\r\n self.init_EmptyElec(elec_len=len(elec))\r\n self.elec.update(elec)\r\n self.elec = self.elec.astype(self.elec_dtypes)\r\n\r\n def init_EmptyData(self, data_len=None):\r\n \"\"\" wrapper to create empty (None) data dataframe with the proper headers and datatypes.\"\"\"\r\n self.data = pd.DataFrame(None, index=range(data_len), columns=range(len(self.data_headers)))\r\n self.data.rename(columns=dict(zip(range(len(self.data_headers)), self.data_headers)), inplace=True)\r\n self.data = self.data.astype(self.data_dtypes)\r\n\r\n def init_EmptyElec(self, elec_len=None):\r\n \"\"\" wrapper to create empty (None) data dataframe with the proper headers and datatypes.\"\"\"\r\n self.elec = pd.DataFrame(None, index=range(elec_len), columns=range(len(self.elec_headers)))\r\n self.elec.rename(columns=dict(zip(range(len(self.elec_headers)), self.elec_headers)), inplace=True)\r\n self.elec = self.elec.astype(self.elec_dtypes)\r\n\r\n def process_rec(self, fun_rec=fun_rec, x='r', x_avg='rec_avg', x_err='rec_err'):\r\n a = self.data['a'].to_numpy(dtype=int)\r\n b = self.data['b'].to_numpy(dtype=int)\r\n m = self.data['m'].to_numpy(dtype=int)\r\n n = self.data['n'].to_numpy(dtype=int)\r\n x = self.data[x].to_numpy(dtype=float)\r\n rec_num, rec_avg, rec_err, rec_fnd = fun_rec(a, b, m, n, x)\r\n self.data['rec_num'] = rec_num\r\n self.data['rec_fnd'] = rec_fnd\r\n self.data[x_avg] = rec_avg\r\n self.data[x_err] = rec_err\r\n\r\n def get_k(self, data_k):\r\n if len(self.data) == len(data_k):\r\n self.data['k'] = data_k['k']\r\n elif len(data_k) < len(self.data):\r\n raise IndexError('len k < len data, make sure the right k file is used')\r\n elif len(self.data) < len(data_k):\r\n warnings.warn('len k != len data; make sure the right k file is used', category=UserWarning)\r\n # data_k_dtype = {key: self.data_dtypes[key] for key in data_k.columns if key in ['a', 'b', 'm', 'n', 'k']}\r\n # right = data_k[['a', 'b', 'm', 'n', 'k']].astype(data_k_dtype)\r\n self.data = self.data.merge(data_k[['a', 'b', 'm', 'n', 'k']], on=['a', 'b', 'm', 'n'], how='left', suffixes=('', '_k'), copy=False)\r\n self.data['k'] = self.data['k_k']\r\n self.data.drop(columns='k_k', inplace=True)\r\n\r\n def couple_rec(self, couple=False, keep_unpaired=False, dir_mark=1, rec_mark=2, unpaired_mark=0):\r\n if (not couple and keep_unpaired): # i.e. keep all, as it is\r\n return()\r\n groupby_df = self.data.groupby(self.data['rec_fnd'])\r\n if (couple and keep_unpaired):\r\n self.data = pd.concat([groupby_df.get_group(dir_mark), groupby_df.get_group(unpaired_mark)])\r\n elif(couple and not keep_unpaired):\r\n self.data = groupby_df.get_group(dir_mark)\r\n\r\n def to_bert(self, fname, wrt_rhoa, wrt_ip, data_columns, elec_columns):\r\n print(data_columns)\r\n if wrt_rhoa:\r\n data_columns.append('rhoa')\r\n if wrt_ip:\r\n data_columns.append('ip')\r\n with open(fname, 'a') as file_handle:\r\n file_handle.write(str(len(self.elec)))\r\n file_handle.write('\\n')\r\n file_handle.write('# ' + ' '.join(elec_columns) + '\\n')\r\n self.elec[elec_columns].to_csv(file_handle, sep=' ', index=None, line_terminator='\\n', header=False)\r\n file_handle.write(str(len(self.data[self.data.valid == 1])))\r\n file_handle.write('\\n')\r\n file_handle.write('# ' + ' '.join(data_columns) + '\\n')\r\n self.data[self.data.valid == 1][data_columns].to_csv(file_handle, sep=' ', index=None, line_terminator='\\n', header=False)\r\n\r\n def plot(self, fname, plot_columns, valid_column='valid'):\r\n colors_validity = {1: 'b', 0: 'r'}\r\n labels_validity = {1: 'Valid', 0: 'Invalid'}\r\n groupby_df = self.data.groupby(self.data['valid'])\r\n for key in groupby_df.groups.keys(): # for group 1 (valid) and group 0 (invalid)\r\n meas = groupby_df.get_group(key)['meas'].to_numpy(dtype=int)\r\n for c in plot_columns:\r\n fig_name = fname + labels_validity[key] + '_' + c + '.png'\r\n plt.plot(meas, groupby_df.get_group(key)[c].to_numpy(), 'o', color=colors_validity[key], markersize=4)\r\n plt.ylabel(c)\r\n plt.xlabel('measurement num')\r\n plt.tight_layout()\r\n plt.savefig(fig_name)\r\n plt.close()\r\n\r\n def report(self, report_columns=['ctc_valid', 'stk_valid', 'v_valid', 'rec_valid', 'k_valid', 'rhoa_valid', 'valid']):\r\n for c in report_columns:\r\n print('-----\\n', self.data[c].value_counts())\r\n\r\n\r\ndef get_options(args_dict: dict):\r\n \"\"\" get options from command-l, update with function args_dict if needed, then check consistency\r\n \"\"\"\r\n args = get_cmd()\r\n for key, val in args_dict.items():\r\n if not hasattr(args, key):\r\n raise AttributeError('unrecognized option: ', key)\r\n else:\r\n setattr(args, key, val)\r\n args = check_cmd(args)\r\n return(args)\r\n\r\n\r\ndef __process__(file, args):\r\n \"\"\" process ERT file \"\"\"\r\n \r\n print('\\n', '-' * 80)\r\n if args.fType == 'labrecque':\r\n elec, data = read_labrecque(file)\r\n # pass to ERTdataset class\r\n ds = ERTdataset(data=data, elec=elec)\r\n # adjust\r\n if args.shift_abmn is not None:\r\n ds.data[['a', 'b', 'm', 'n']] += args.shift_abmn\r\n if args.shift_meas is not None:\r\n ds.data['meas'] += args.shift_meas\r\n if args.shift_elec is not None:\r\n ds.elec['num'] += args.shift_elec\r\n if args.elec_coordinates is not None:\r\n ds.elec = pd.read_csv(args.elec_coordinates, delim_whitespace=True)\r\n # filters\r\n if args.rec:\r\n ds.process_rec()\r\n ds.data['rec_valid'] = ds.data['rec_err'] < args.rec_max\r\n if any(ds.data['ip']):\r\n ds.process_rec(x='ip', x_avg='rec_ip_avg', x_err='rec_ip_err')\r\n if args.ctc:\r\n ds.data['ctc_valid'] = ds.data['ctc'] < args.ctc_max\r\n if args.stk:\r\n ds.data['stk_valid'] = ds.data['stk'] < args.stk_max\r\n if args.volt:\r\n ds.data['v_valid'] = ds.data['v'] > args.v_min\r\n if (args.k or args.rhoa): # get k if either k or rhoa are True\r\n elec_kfile, data_kfile = read_bert(k_file=args.k_file)\r\n ds.get_k(data_kfile)\r\n if args.k:\r\n ds.data['k_valid'] = ds.data['k'].abs() < args.k_max\r\n if args.rhoa:\r\n ds.data['rhoa'] = ds.data['r'] * ds.data['k']\r\n ds.data['rhoa_valid'] = ds.data['rhoa'].between(args.rhoa_min, args.rhoa_max)\r\n # combine filters\r\n ds.data['valid'] = ds.data[['rec_valid', 'k_valid', 'rhoa_valid', 'v_valid', 'ctc_valid', 'stk_valid']].all(axis='columns')\r\n output_fname = output_files(file, extension='.csv')\r\n ds.data.to_csv(output_fname) # dump all data\r\n # combine rec for lighter output and inversion\r\n ds.couple_rec(couple=args.rec_couple, keep_unpaired=args.rec_keep_unpaired)\r\n # output\r\n output_fname = output_files(file, extension='.dat')\r\n ds.to_bert(output_fname, wrt_rhoa=args.wrt_rhoa, wrt_ip=args.wrt_ip, data_columns=['a', 'b', 'm', 'n', 'r'], elec_columns=['x', 'y', 'z'])\r\n ds.report()\r\n # plot\r\n if args.plot:\r\n ds.plot(file, plot_columns=['ctc', 'stk', 'v', 'rec_err', 'k', 'rhoa'], valid_column='valid')\r\n\r\n\r\ndef process(**kargs):\r\n \"\"\" process function,\r\n it takes **kargs so that cmd-line arguments can be passed as funciton arguments too \"\"\"\r\n \r\n args = get_options(args_dict=kargs)\r\n \r\n for file in args.fName:\r\n __process__(file, args)\r\n\r\nif __name__ == '__main__':\r\n process()\r\n", "sub_path": "processing/process.py", "file_name": "process.py", "file_ext": "py", "file_size_in_byte": 18567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 99, "usage_type": "call"}, {"api_name": "itertools.takewhile", "line_number": 115, "usage_type": "call"}, {"api_name": "itertools.takewhile", "line_number": 125, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 125, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 126, "usage_type": "call"}, {"api_name": "itertools.takewhile", "line_number": 127, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 127, "usage_type": "call"}, {"api_name": "itertools.dropwhile", "line_number": 127, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 129, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 176, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 245, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 251, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 273, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 314, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 314, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 354, "usage_type": "call"}]} +{"seq_id": "303362661", "text": "from sympy import *\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#THIS IS ONLY A VISUAL ASPECT OF GRADIENT DESCENT 2D\nx=Symbol('x')\n\n\nfrom scipy.misc import derivative\n#making some functions so the later calculatiosn can be handeld through them\ndef f(x):#inpute function\n return x**2\n\ndef d(x):#deritivate of input\n return derivative(f,x)\n\n\ny=np.linspace(-50,50)\n\n\n\nplt.plot(y,f(y))#plot of the input function\n\na = random.randint(-50,50)\n\nfor i in range(1,10):\n if d(x).subs(x, a) <0:\n m = d(x).subs(x, a)\n else:\n m = -d(x).subs(x, a)\n\n b = f(x).subs(x, a) - d(x).subs(x, a) * a\n print(\"slope\")\n print(m)\n print(b)\n print(a)\n plt.plot(y, m*y+b)\n a=(1/5)*a\n if -m <=.05:\n break\nplt.show()\n", "sub_path": "Gradient.py", "file_name": "Gradient.py", "file_ext": "py", "file_size_in_byte": 766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scipy.misc.derivative", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "113297279", "text": "from requests import Session\nfrom bs4 import BeautifulSoup as bs\nimport re\nimport os\nimport file\n\nUserName = os.getenv('USERNAME')\nPassword = os.getenv('PASSWORD')\nfingerprint = os.getenv('FINGERPRINT')\nprint('lektiescanning...')\n\n\n\nwith Session() as s:\n site = s.get(\"https://nr-aadal.viggo.dk/Basic/Account/Login\")\n bs_content = bs(site.content, \"html.parser\")\n login_data = {\"UserName\": UserName, \"Password\": Password, \"fingerprint\": fingerprint}\n s.post(\"https://nr-aadal.viggo.dk/Basic/Account/Login\", login_data)\n home_page = s.get(\"https://nr-aadal.viggo.dk/Basic/HomeworkAndAssignment\")\n home_page = str(home_page.content).replace('\\\\n', '\\n').replace('\\\\r', '\\r').replace('\\\\xc3\\\\xb8', 'ø').replace('\\\\xc3\\\\xa5', 'å').replace('ø', 'ø').replace('å', 'å').replace('\\\\xc3\\\\xa6', 'æ').replace('\\\\xc3\\\\x98', 'Ø')\n links = re.findall(\"(?<=).*?(?=)\", home_page)\n begivenhed.append(newBegivenhed[0].replace('æ', 'æ'))\n newTidspunkt = re.findall(\"(?<=
).*?(?= <)\", home_page)\n tidspunkt.append(newTidspunkt[0])\n newBeskrivelse = re.findall(\"(?<=
).*?(?=
)\", home_page)\n linkInPost = ''\n if \"\\\" rel=\\\"noopener noreferrer\\\" target=\\\"_blank\\\">\" in newBeskrivelse[0]:\n linkInPost = re.findall(\"(?<=\\\" rel=\\\"noopener noreferrer\\\" target=\\\"_blank\\\">).*?(?=)\", newBeskrivelse[0])[0]\n doubleLink = linkInPost + linkInPost\n preHexRemoval = newBeskrivelse[0].replace('

', '\\n').replace('

', '').replace('', '').replace('', '').replace('
', '').replace('', '').replace('', '').replace(doubleLink, linkInPost)\n preHexRemoval = preHexRemoval.replace('\\\\x', '|')\n hexToRemove = re.findall(\"(?<=\\|).*?(?= |\\n)\", preHexRemoval)\n for i in range(0, len(hexToRemove)):\n shitToReplaceInForLoop = hexToRemove[i]\n preHexRemoval = preHexRemoval.replace(shitToReplaceInForLoop, '')\n finishedBeskrivelse = preHexRemoval.replace('|', '')\n beskrivelse.append(finishedBeskrivelse)\n newAuthor = re.findall(\"(?<=

).*?(?=

)\", home_page)\n author.append(newAuthor[0])\n newFil = re.findall(\"(?<=).*?(?=)\", home_page)\n if len(newFileName) != 0:\n for i in range(0, len(newFileName)):\n newFileName[i] = newFileName[i].replace('æ', 'æ')\n fileNameCollection = str(newFileName).replace('[', '').replace(']', '').replace('\\'', '')\n else:\n fileNameCollection = \"Ingen\"\n fileNames.append(fileNameCollection)\n #for i in range(0, len(begivenhed)):\n # print(\"\\nBegivenhed: \" + begivenhed[i])\n # print(\"Tidspunkt: \" + tidspunkt[i])\n # print(\"Beskrivelse: \" + beskrivelse[i])\n", "sub_path": "lektiescanner.py", "file_name": "lektiescanner.py", "file_ext": "py", "file_size_in_byte": 3978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 14, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 32, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 34, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 36, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 39, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 43, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 49, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 51, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "589305034", "text": "from pydantic import Field, validator\nfrom typing import List\n\nfrom .base import OptionalSectionConfig, SnakeModel\n\n\nclass PeriodicTaskDTO(OptionalSectionConfig):\n liveInterval: int = Field(example=10)\n\n @validator(\"name\")\n def validate_name(cls, value):\n if value != \"metrics\":\n raise ValueError(f\"Not supported periodic task named: {value}\")\n return value\n\n\nclass PeriodicTaskListDTO(SnakeModel):\n periodicTasks: List[PeriodicTaskDTO]\n", "sub_path": "api/models/periodic_task.py", "file_name": "periodic_task.py", "file_ext": "py", "file_size_in_byte": 474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "base.OptionalSectionConfig", "line_number": 7, "usage_type": "name"}, {"api_name": "pydantic.Field", "line_number": 8, "usage_type": "call"}, {"api_name": "pydantic.validator", "line_number": 10, "usage_type": "call"}, {"api_name": "base.SnakeModel", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "155237920", "text": "from pathlib import Path\nimport random\nimport pygame\nimport math\nfrom gobject import GameObject\n\n\n# 敵機類別\nclass Enemy(GameObject):\n\n # 建構式\n def __init__(self, xy=None, playground=None, sensitivity=1):\n GameObject.__init__(self, playground)\n if xy is None:\n self._y = -100\n self._x = random.randint(10, playground[0] - 100)\n else:\n self._x = xy[0] # 座標屬性\n self._y = xy[1] #\n\n # 設定邊界\n self._objectBound = (10, self._playground[0] - 100, -100, self._playground[1]) # 右, 左, 上, 下\n self._moveScale = 0.1 * sensitivity\n if random.random() > 0.5:\n self._slop = 0.5\n else:\n self._slop = -0.5\n self._moveScaleY = math.cos(self._slop * math.pi / 2) * self._moveScale\n self._moveScaleX = math.sin(self._slop * math.pi / 2) * self._moveScale\n\n __parent_path = Path(__file__).parents[1]\n self.__enemy_path = __parent_path / 'res' / 'enemy.png'\n self._image = pygame.image.load(self.__enemy_path)\n self._center = self._x + self._image.get_rect().w / 2, self._y + self._image.get_rect().h / 2\n self._radius = 0.3 * math.hypot(self._image.get_rect().w, self._image.get_rect().h)\n\n self.to_the_bottom()\n\n def to_the_bottom(self):\n self._changeY = self._moveScaleY\n self._changeX = self._moveScaleX\n\n def update(self):\n self._x += self._changeX\n self._y += self._changeY\n\n if random.random() < 0.001:\n self._slop = -self._slop\n self._changeX = math.sin(self._slop * math.pi / 2) * self._moveScale\n if self._x > self._objectBound[1]:\n self._x = self._objectBound[1]\n self._slop = -self._slop\n self._changeX = math.sin(self._slop * math.pi / 2) * self._moveScale\n if self._x < self._objectBound[0]:\n self._x = self._objectBound[0]\n self._slop = -self._slop\n self._changeX = math.sin(self._slop * math.pi / 2) * self._moveScale\n if self._y > self._objectBound[3]:\n self._y = self._objectBound[3]\n # 超過螢幕範圍,標記為失效\n self._available = False\n if self._y < self._objectBound[2]:\n self._y = self._objectBound[2]\n\n self._center = self._x + self._image.get_rect().w / 2, self._y + self._image.get_rect().h / 2\n", "sub_path": "code/enemy.py", "file_name": "enemy.py", "file_ext": "py", "file_size_in_byte": 2435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "gobject.GameObject", "line_number": 9, "usage_type": "name"}, {"api_name": "gobject.GameObject.__init__", "line_number": 13, "usage_type": "call"}, {"api_name": "gobject.GameObject", "line_number": 13, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 16, "usage_type": "call"}, {"api_name": "random.random", "line_number": 24, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 28, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 28, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 29, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 33, "usage_type": "attribute"}, {"api_name": "math.hypot", "line_number": 35, "usage_type": "call"}, {"api_name": "random.random", "line_number": 47, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 49, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 49, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 53, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 57, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 57, "usage_type": "attribute"}]} +{"seq_id": "551294703", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/kieffer/workspace/fabio/build/lib.macosx-10.6-intel-3.5/fabio/test/testfabioimage.py\n# Compiled at: 2020-04-03 09:02:03\n# Size of source mod 2**32: 8984 bytes\n\"\"\"\nTest cases for the fabioimage class\n\ntestsuite by Jerome Kieffer (Jerome.Kieffer@esrf.eu)\n28/11/2014\n\"\"\"\nfrom __future__ import print_function, with_statement, division, absolute_import\nimport unittest, sys, os, numpy, copy, logging\nlogger = logging.getLogger(__name__)\nfrom ..fabioimage import FabioImage\nfrom .. import fabioutils\nfrom ..utils import pilutils\nfrom .utilstest import UtilsTest\ntry:\n import pathlib\nexcept ImportError:\n try:\n import pathlib2 as pathlib\n except ImportError:\n pathlib = None\n\nclass Test50000(unittest.TestCase):\n __doc__ = ' test with 50000 everywhere'\n\n def setUp(self):\n \"\"\"make the image\"\"\"\n dat = numpy.ones((1024, 1024), numpy.uint16)\n dat = (dat * 50000).astype(numpy.uint16)\n assert dat.dtype.char == numpy.ones(1, numpy.uint16).dtype.char\n hed = {'Title': '50000 everywhere'}\n self.obj = FabioImage(dat, hed)\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n self.obj = None\n\n def testgetmax(self):\n \"\"\"check max\"\"\"\n self.assertEqual(self.obj.getmax(), 50000)\n\n def testgetmin(self):\n \"\"\"check min\"\"\"\n self.assertEqual(self.obj.getmin(), 50000)\n\n def testgetmean(self):\n \"\"\"check mean\"\"\"\n self.assertEqual(self.obj.getmean(), 50000)\n\n def testgetstddev(self):\n \"\"\"check stddev\"\"\"\n self.assertEqual(self.obj.getstddev(), 0)\n\n def testcopy(self):\n \"\"\"test the copy statement\"\"\"\n c = copy.copy(self.obj)\n self.assertNotEqual(id(c), id(self.obj), 'object differ')\n self.assertEqual(c.header, self.obj.header, 'header are the same')\n self.assertEqual(abs(c.data - self.obj.data).max(), 0, 'data are the same')\n self.assertEqual(c.filename, self.obj.filename, 'filename is the same')\n\n\nclass TestSlices(unittest.TestCase):\n __doc__ = 'check slicing'\n\n def setUp(self):\n \"\"\"make test data\"\"\"\n dat2 = numpy.zeros((1024, 1024), numpy.uint16)\n hed = {'Title': 'zeros and 100'}\n self.cord = [256, 256, 790, 768]\n self.obj = FabioImage(dat2, hed)\n self.slic = slic = self.obj.make_slice(self.cord)\n dat2[slic] = dat2[slic] + 100\n assert self.obj.maxval is None\n assert self.obj.minval is None\n self.npix = (slic[0].stop - slic[0].start) * (slic[1].stop - slic[1].start)\n\n def testgetmax(self):\n \"\"\"check max\"\"\"\n self.assertEqual(self.obj.getmax(), 100)\n\n def testgetmin(self):\n \"\"\"check min\"\"\"\n self.assertEqual(self.obj.getmin(), 0)\n\n def testintegratearea(self):\n \"\"\" check integrations\"\"\"\n self.obj.resetvals()\n area1 = self.obj.integrate_area(self.cord)\n self.obj.resetvals()\n area2 = self.obj.integrate_area(self.slic)\n self.assertEqual(area1, area2)\n self.assertEqual(area1, self.npix * 100)\n\n def testRebin(self):\n \"\"\"Test the rebin method\"\"\"\n big = numpy.arange(64).reshape((8, 8))\n res = numpy.array([[13, 17], [45, 49]])\n fabimg = FabioImage(data=big, header={})\n fabimg.rebin(4, 4)\n self.assertEqual(abs(res - fabimg.data).max(), 0, 'data are the same after rebin')\n\n\nclass TestOpen(unittest.TestCase):\n __doc__ = 'check opening compressed files'\n testfile = os.path.join(UtilsTest.tempdir, 'testfile')\n\n def setUp(self):\n \"\"\" create test files\"\"\"\n if not os.path.isfile(self.testfile):\n with open(self.testfile, 'wb') as (f):\n f.write(b'{ hello }')\n if not os.path.isfile(self.testfile + '.gz'):\n with fabioutils.GzipFile(self.testfile + '.gz', 'wb') as (wf):\n wf.write(b'{ hello }')\n if not os.path.isfile(self.testfile + '.bz2'):\n with fabioutils.BZ2File(self.testfile + '.bz2', 'wb') as (wf):\n wf.write(b'{ hello }')\n self.obj = FabioImage()\n\n def testFlat(self):\n \"\"\" no compression\"\"\"\n res = self.obj._open(self.testfile)\n self.assertEqual(res.read(), b'{ hello }')\n res.close()\n\n def testgz(self):\n \"\"\" gzipped \"\"\"\n res = self.obj._open(self.testfile + '.gz')\n self.assertEqual(res.read(), b'{ hello }')\n res.close()\n\n def testbz2(self):\n \"\"\" bzipped\"\"\"\n res = self.obj._open(self.testfile + '.bz2')\n self.assertEqual(res.read(), b'{ hello }')\n res.close()\n\n def test_badtype(self):\n self.assertRaises(TypeError, self.obj._open, None)\n\n def test_pathlib(self):\n if pathlib is None:\n self.skipTest('pathlib is not available')\n path = pathlib.PurePath(self.testfile + '.bz2')\n res = self.obj._open(path)\n self.assertIsNotNone(res)\n res.close()\n\n\nclass TestPilImage(unittest.TestCase):\n __doc__ = ' check PIL creation'\n\n def setUp(self):\n if pilutils.Image is None:\n self.skipTest('PIL is not available')\n self.okformats = [\n numpy.uint8,\n numpy.int8,\n numpy.uint16,\n numpy.int16,\n numpy.uint32,\n numpy.int32,\n numpy.float32]\n\n def mkdata(self, shape, typ):\n \"\"\" generate [01] testdata \"\"\"\n return numpy.random.random(shape).astype(typ)\n\n def testpil(self):\n for typ in self.okformats:\n for shape in [(10, 20), (431, 1325)]:\n testdata = self.mkdata(shape, typ)\n img = FabioImage(testdata, {'title': 'Random data'})\n pim = img.toPIL16()\n for i in [0, 5, 6, shape[1] - 1]:\n for j in [0, 5, 7, shape[0] - 1]:\n errstr = str(typ) + ' %d %d %f %f t=%s' % (\n i, j, testdata[(j, i)], pim.getpixel((i, j)), typ)\n er1 = img.data[(j, i)] - pim.getpixel((i, j))\n er2 = img.data[(j, i)] + pim.getpixel((i, j))\n if er2 != 0.0:\n err = er1 / er2\n else:\n err = er1\n self.assertAlmostEqual(err, 0, 6, errstr)\n\n\nclass TestPilImage2(TestPilImage):\n __doc__ = ' check with different numbers'\n\n def mkdata(self, shape, typ):\n \"\"\" positive and big\"\"\"\n return (numpy.random.random(shape) * sys.maxsize / 10).astype(typ)\n\n\nclass TestPilImage3(TestPilImage):\n __doc__ = ' check with different numbers'\n\n def mkdata(self, shape, typ):\n \"\"\" positive, negative and big\"\"\"\n return ((numpy.random.random(shape) - 0.5) * sys.maxsize / 10).astype(typ)\n\n\nclass TestDeprecatedFabioImage(unittest.TestCase):\n\n def test_patch_dim(self):\n data = numpy.array(numpy.arange(30)).reshape(3, 10)\n image = FabioImage(data=data)\n image.dim2, image.dim1 = data.shape\n self.assertEqual(image.shape, data.shape)\n\n def test_cleanup_pilimage_cache(self):\n data = numpy.array(numpy.arange(30)).reshape(3, 10)\n image = FabioImage(data=data)\n image.pilimage = None\n\n\ndef suite():\n loadTests = unittest.defaultTestLoader.loadTestsFromTestCase\n testsuite = unittest.TestSuite()\n testsuite.addTest(loadTests(Test50000))\n testsuite.addTest(loadTests(TestSlices))\n testsuite.addTest(loadTests(TestOpen))\n testsuite.addTest(loadTests(TestPilImage))\n testsuite.addTest(loadTests(TestPilImage2))\n testsuite.addTest(loadTests(TestPilImage3))\n testsuite.addTest(loadTests(TestDeprecatedFabioImage))\n return testsuite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())", "sub_path": "pycfiles/fabio-0.10.0-cp35-cp35m-macosx_10_6_intel/testfabioimage.cpython-35.py", "file_name": "testfabioimage.cpython-35.py", "file_ext": "py", "file_size_in_byte": 7949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 36, "usage_type": "attribute"}, {"api_name": "fabioimage.FabioImage", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.TestCase.tearDown", "line_number": 41, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 41, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 62, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 74, "usage_type": "attribute"}, {"api_name": "fabioimage.FabioImage", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "fabioimage.FabioImage", "line_number": 105, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "utilstest.UtilsTest.tempdir", "line_number": 112, "usage_type": "attribute"}, {"api_name": "utilstest.UtilsTest", "line_number": 112, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "fabioimage.FabioImage", "line_number": 125, "usage_type": "call"}, {"api_name": "pathlib2.PurePath", "line_number": 151, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 157, "usage_type": "attribute"}, {"api_name": "utils.pilutils.Image", "line_number": 161, "usage_type": "attribute"}, {"api_name": "utils.pilutils", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 174, "usage_type": "attribute"}, {"api_name": "fabioimage.FabioImage", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 200, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 208, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 208, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 214, "usage_type": "call"}, {"api_name": "fabioimage.FabioImage", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 220, "usage_type": "call"}, {"api_name": "fabioimage.FabioImage", "line_number": 221, "usage_type": "call"}, {"api_name": "unittest.defaultTestLoader", "line_number": 226, "usage_type": "attribute"}, {"api_name": "unittest.TestSuite", "line_number": 227, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 239, "usage_type": "call"}]} +{"seq_id": "457946847", "text": "import json\nimport logging\n\n# Used to compute heartbeat. Importing these slows down load time noticeably.\nimport scipy\nfrom scipy import signal\n\nfrom Patterns.PatternBase import PatternBase\nfrom ProgramModules.Timers import Timer\n\nlogger = logging.getLogger(__name__)\n\n\nclass LED(object):\n\t\"\"\"A representation of an LED to capture position and light values.\"\"\"\n\n\tdef __init__(self, color_triple=None):\n\t\t\"\"\"Initialize.\n\n\t\tArgs:\n\t\t color_triple: A tuple of 3 color values for RGB.\n\t\t\"\"\"\n\t\tif not color_triple:\n\t\t\tself.color_triple = (0, 0, 0)\n\t\telse:\n\t\t\tself.color_triple = color_triple\n\t\tself._brightness = 0\n\n\t@property\n\tdef color_triple(self):\n\t\treturn self._color_triple\n\n\t@color_triple.setter\n\tdef color_triple(self, color_triple):\n\t\tif color_triple[0] < 0 or color_triple[0] > 255:\n\t\t\tmsg = ('color_triple[0] must be between 0 and 255, '\n\t\t\t 'was %s instead' % color_triple[0])\n\t\t\traise ValueError(msg)\n\t\tif color_triple[1] < 0 or color_triple[1] > 255:\n\t\t\tmsg = ('color_triple[1] must be between 0 and 255, '\n\t\t\t 'was %s instead' % color_triple[1])\n\t\t\traise ValueError(msg)\n\t\tif color_triple[2] < 0 or color_triple[2] > 255:\n\t\t\tmsg = ('color_triple[2] must be between 0 and 255, '\n\t\t\t 'was %s instead' % color_triple[2])\n\t\t\traise ValueError(msg)\n\t\tself._color_triple = color_triple\n\n\tdef getColorWithBrightness(self):\n\t\t\"\"\"Get the color value with applied brightness as a hex string.\n\n\t\tFor example, if the color value would be (255,255,0), the hex\n\t\tstring would be \"#FFFF00\".\n\t\t\"\"\"\n\t\tbrightness_triple = list(self.color_triple)\n\t\tbrightness_triple[0] = int(brightness_triple[0]*self.brightness)\n\t\tbrightness_triple[1] = int(brightness_triple[1]*self.brightness)\n\t\tbrightness_triple[2] = int(brightness_triple[2]*self.brightness)\n\t\treturn tuple(brightness_triple)\n\n\t@property\n\tdef brightness(self):\n\t\treturn self._brightness\n\n\t@brightness.setter\n\tdef brightness(self, brightness):\n\t\t\"\"\"Set brightness as a value 0 <= x <= 1.\"\"\"\n\t\tif brightness < 0:\n\t\t\traise ValueError(\"'brigthness must not be smaller than 0'\")\n\t\telif brightness > 1:\n\t\t\traise ValueError(\"'brigthness must not be larger than 1'\")\n\t\tself._brightness = brightness\n\n\nclass LEDGrid(object):\n\t\"\"\"A two-dimensional grid of LEDs.\"\"\"\n\n\tdef __init__(self, row_count, col_count, default_color_triple=None):\n\t\t\"\"\"Initialize.\n\n\t\tArgs:\n\t\t row_count: The number of rows.\n\t\t col_count: The number of columns.\n\t\t\"\"\"\n\t\tself._row_count = row_count\n\t\tself._col_count = col_count\n\t\tif not default_color_triple:\n\t\t\tdefault_color_triple = (0, 0, 0)\n\t\tself._leds = []\n\t\tself._leds = [[LED(default_color_triple)\n\t\t\t for col in range(col_count)]\n\t\t\t for row in range(row_count)]\n\n\t@property\n\tdef row_count(self):\n\t\treturn self._row_count\n\n\t@property\n\tdef col_count(self):\n\t\treturn self._col_count\n\n\tdef getLED(self, row, col):\n\t\t\"\"\"Returns the LED at (row,column).\"\"\"\n\t\treturn self._leds[row][col]\n\n\t@staticmethod\n\tdef getDistance(left, right):\n\t\t\"\"\"Computes the distance of one LED to another LED.\n\n\t\tArgs:\n\t\t left: First LED position (row, col).\n\t\t right: Second LED position (row, col).\n\n\t\tReturns:\n\t\t A (row distance, column distance) tuple.\n\t\t\"\"\"\n\t\tcol_distance = 0\n\t\trow_distance = 0\n\t\tif left[1] > right[1]:\n\t\t\tcol_distance = left[1] - right[1]\n\t\telse:\n\t\t\tcol_distance = right[1] - left[1]\n\t\tif left[0] > right[0]:\n\t\t\trow_distance = left[0] - right[0]\n\t\telse:\n\t\t\trow_distance = right[0] - left[0]\n\t\treturn (row_distance, col_distance)\n\n\n\nclass HeartBeat(PatternBase):\n\t\"\"\"An LED pattern emulating a heart beat.\"\"\"\n\n\t# Difference in brightness per step (either increasing of decreasing).\n\tBRIGHTNESS_DIFF = 0.1\n\n\t# Differences in brightness in relation to the distance to the point\n\t# (the further away, the darker).\n\tDISTANCE_DIFF = 0.05\n\n\tdef __init__(self, *args):\n\t\t# Passed in via SculptureModuleBase.addPattern().\n\t\tgrid_size = args[0]\n\t\tself.row_count = grid_size[0]\n\t\tself.col_count = grid_size[1]\n\t\tlogger.info('HearBeat Pattern initialized (size=[%s,%s])',\n\t\t\t self.row_count, self.col_count)\n\t\t# A dictionary of input parameters rendered in the UI. Note\n\t\t# that those are used by PatternBase.__init__() to construct\n\t\t# self.inputs, an InputManager.InputCollection object.\n\t\t#\n\t\t# 'type' and 'subtype' are used to create the input name as\n\t\t# defined in Inputs.Basic.inputTypes.\n\t\tself.inputParams = {\n\t\t\t'PatternMultiVal' : {\n\t\t\t\t'descriptionInPattern' : 'Parameters',\n\t\t\t\t'type' : 'multi',\n\t\t\t\t'subType' : 'basic',\n\t\t\t\t'number' : 3,\n\t\t\t\t'basicInputType' : 'int value',\n\t\t\t\t'min' : [1, 1],\n\t\t\t\t'max' : [self.col_count - 1, 100],\n\t\t\t\t'default' : [self.col_count / 2, 70],\n\t\t\t\t'description' : ['heart position',\n\t\t\t\t\t\t 'maximum brightness',\n\t\t\t\t\t ],\n\t\t\t\t'channels' : ['heart_pos',\n\t\t\t\t\t 'max_brightness',\n\t\t\t\t ],\n\t\t\t},\n\t\t\t'ColorMultiVal' : {\n\t\t\t\t'descriptionInPattern' : 'Parameters',\n\t\t\t\t'type' : 'multi',\n\t\t\t\t'subType' : 'basic',\n\t\t\t\t'number' : 3,\n\t\t\t\t'basicInputType' : 'int value',\n\t\t\t\t'min' : [0, 0, 0],\n\t\t\t\t'max' : [255, 255, 255],\n\t\t\t\t# red is a good default for a heartbeat.\n\t\t\t\t'default' : [255, 0, 0],\n\t\t\t\t'description' : ['red color additive',\n\t\t\t\t\t\t 'green color additive',\n\t\t\t\t\t\t 'blue color additive',\n\t\t\t\t],\n\t\t\t\t'channels' : ['red',\n\t\t\t\t\t 'green',\n\t\t\t\t\t 'blue',\n\t\t\t\t],\n\t\t\t},\n\t\t\t'triggerStep' : {\n\t\t\t\t'descriptionInPattern' :\n\t\t\t\t'Interval between refreshs',\n\t\t\t\t'type' : 'pulse',\n\t\t\t\t'subType' : 'timer',\n\t\t\t\t'bindToFunction' : 'triggerStep',\n\t\t\t\t'min' : 1 ,\n\t\t\t\t'max' : 100,\n\t\t\t\t'default' : 1,\n\t\t\t},\n\t\t\t'triggerSequence' : {\n\t\t\t\t'descriptionInPattern' : 'Activate',\n\t\t\t\t'type' : 'pulse',\n\t\t\t\t'subType' : 'onOff',\n\t\t\t\t'bindToFunction' : 'triggerSequence',\n\t\t\t},\n\t\t\t# Name must match channel defined above in\n\t\t\t# 'multiVal'. This inputs do not have corresponding\n\t\t\t# function bindings, but we update the matching\n\t\t\t# internal values on triggerStep(), thus having a\n\t\t\t# near-immediate effect.\n\t\t\t#\n\t\t\t# Note that this is a one-dimensional value - we\n\t\t\t# simplify and assume the LED grid only has 1 row and\n\t\t\t# 'heart_pos' identifies the column position of the\n\t\t\t# heart.\n\t\t\t'heart_pos' : {\n\t\t\t\t'descriptionInPattern' :\n\t\t\t\t'Position of the heart.',\n\t\t\t\t'type' : 'value',\n\t\t\t\t'bindToFunction' : '_update_heart_position',\n\t\t\t},\n\t\t\t# Name must match channel defined above in 'multiVal'.\n\t\t\t'max_brightness' : {\n\t\t\t\t'descriptionInPattern' :\n\t\t\t\t'The maximum brightness of the heart LED.',\n\t\t\t\t'type' : 'value',\n\t\t\t},\n\t\t\t'red' : {\n\t\t\t\t'descriptionInPattern' : 'Red value of color.',\n\t\t\t\t'type' : 'value',\n\t\t\t},\n\t\t\t'green' : {\n\t\t\t\t'descriptionInPattern' : 'Green value of color.',\n\t\t\t\t'type' : 'value',\n\t\t\t},\n\t\t\t'blue' : {\n\t\t\t\t'descriptionInPattern' : 'Blue value of color.',\n\t\t\t\t'type' : 'value',\n\t\t\t},\n\t\t}\n\t\tPatternBase.__init__(self, *args)\n\t\tself.sequenceTriggered = False\n\n\t\tself._red = self.inputs.red\n\t\tself._green = self.inputs.green\n\t\tself._blue = self.inputs.blue\n\n\t\tself._led_grid = LEDGrid(self.row_count, self.col_count,\n\t\t\t\t\t (self._red, self._green, self._blue))\n\t\tself._heart_row = -1\n\t\tself._heart_col = -1\n\t\tself._heart_led = None\n\t\tself._max_distance = -1\n\t\t# Note that this sets the self._heart_... attributes.\n\t\tself._update_heart_position(0, self.inputs.heart_pos)\n\t\tself._max_distance = (0, 0)\n\t\tself._max_brightness = self.inputs.max_brightness\n\t\tself.increase = True\n\n\t\t# A list of precomputed heart brightness values we will read\n\t\t# from on update.\n\t\tself._led_values = self._compute_heart_values()\n\t\t# Determine the last index once, so we dont' have to do it for\n\t\t# every update.\n\t\tself._led_values_last_index = len(self._led_values) - 1\n\t\t# Index of the currently used heart brightness value.\n\t\tself._led_values_index = 0\n\n\tdef _compute_heart_values(self):\n\t\t\"\"\"Precompute heart brightness values.\n\n\t\tThe returned list reflects brightness values emulating a\n\t\tbeating heart. A visualization can iterate over the values\n\t\trepeatedly for a good heart beat pattern.\n\n\t\tWe precompute them because the operation is expensive.\n\n\t\tReturns:\n\t\t A list of precomputed heart beat values.\n\t\t\"\"\"\n\n\t\tvalues = []\n\t\t# No variability as we are going to loop anyway.\n\t\trate_variability = [1.0, 1.0]\n\t\t# The higher the sample rate, the more values we get. The\n\t\t# current value was determined by trial-and-error.\n\t\tsample_rate = 50.0\n\t\t# To be honest, no idea what this means or affects the values.\n\t\tdaub_array = signal.wavelets.daub(10)\n\t\tecg_data_points = []\n\t\tfor r in rate_variability:\n\t\t\tecg_data_points.append(signal.resample(\n\t\t\t\tdaub_array, int(r*sample_rate)))\n\t\tvalues = scipy.concatenate(ecg_data_points)\n\t\t# Equalize data to fit into our expected brightness spectrum.\n\t\tminimum = min(values)\n\t\tmaximum = max(values)\n\t\t# We multiply by 2 to end up with a value greater than 1, but\n\t\t# smaller than 2 (heuristically determined, no smart reason\n\t\t# why).\n\t\tfactor = max(minimum, maximum)*2\n\t\t# The brightness boost is computed based on the factor (a value\n\t\t# between 2 and 1), multiplied by the user-configured maximum\n\t\t# brightness of the heart (which is something between 1 and\n\t\t# 100, so we divide it accordingly.)\n\t\tbrightness_boost = (factor - 1)*(self._max_brightness/100.0)\n\t\t# Now we take the computed values, devide them by the\n\t\t# precomputed factor, which equalizes them somewhat, and add\n\t\t# the computed brightness boost. The addition allows us to set\n\t\t# the heart to the proper brightness value and aligns all other\n\t\t# values accordingly.\n\t\tvalues = [(value/factor)+brightness_boost for value in values]\n\t\treturn values\n\n\tdef _update_heart_position(self, row, col):\n\t\t\"\"\"Set a new heart position and update related values.\"\"\"\n\t\tlogger.info('HeartBeat._update_heart_position(%s, %s)',\n\t\t\t row, col)\n\t\tself._heart_row = row\n\t\tself._heart_col = col\n\t\tself._heart_led = self._led_grid.getLED(row, col)\n\t\tself._max_distance = max(\n\t\t\tLEDGrid.getDistance((0, 0), (row, col)),\n\t\t\tLEDGrid.getDistance((self.row_count, self.col_count),\n\t\t\t\t\t (row, col)))\n\n\tdef _update_color(self, color_triple):\n\t\t\"\"\"Update the color of all LEDs.\n\n\t\tArgs:\n\t\t color_triple: A (red, green, blue) color tuple.\n\t\t\"\"\"\n\t\tfor row in range(self._led_grid.row_count):\n\t\t\tfor col in range(self._led_grid.col_count):\n\t\t\t\tself._led_grid.getLED(row, col).color_triple = color_triple\n\t\tself._red = self.inputs.red\n\t\tself._green = self.inputs.green\n\t\tself._blue = self.inputs.blue\n\n\tdef _update_non_heart_leds(self):\n\t\t\"\"\"Update all LEDs that are not the heart LED.\n\n\t\tThis function iterates through all LEDs, and adjusts their\n\t\tbrightness based on the distance to the heart.\n\n\t\t\"\"\"\n\t\tfor row in range(self._led_grid.row_count):\n\t\t\tfor col in range(self._led_grid.col_count):\n\t\t\t\tdistance = LEDGrid.getDistance(\n\t\t\t\t\t(row, col),\n\t\t\t\t\t(self._heart_row, self._heart_col))\n\t\t\t\t# Use the smaller of the two dimensional\n\t\t\t\t# values.\n\t\t\t\tmin_dist = max(distance[0], distance[1])\n\t\t\t\tnew_brightness = (self._heart_led.brightness -\n\t\t\t\t\t\t min_dist*self.DISTANCE_DIFF)\n\t\t\t\tnew_brightness = max(0, new_brightness)\n\t\t\t\tself._led_grid.getLED(row, col).brightness = new_brightness\n\n\tdef _update_leds(self):\n\t\t\"\"\"Update all LEDs.\n\n\t\tThis function determines the new heart brightness value, then\n\t\tupdates all LEDs accordingly.\n\t\t\"\"\"\n\t\tlogging.debug('HeartBeat._update_leds() called.')\n\t\t# Iterate through precomputed values, start at the beginning\n\t\t# when done.\n\t\tif self._led_values_index < self._led_values_last_index:\n\t\t\tself._led_values_index += 1\n\t\telse:\n\t\t\tself._led_values_index = 0\n\t\t# Determine the brightness based on the precomputed value and\n\t\t# the max_brightness multiplier.\n\t\tbrightness = self._led_values[self._led_values_index]\n\t\tself._heart_led.brightness = brightness\n\t\tself._update_non_heart_leds()\n\n\tdef triggerStep(self, *args):\n\t\t\"\"\"Run one step in the pattern.\n\n\t\tIf input values have been updated, also update the local values\n\t\tand recompute the heart brightness values.\n\n\t\t\"\"\"\n\t\tif self.inputs.triggerStep and self.sequenceTriggered:\n\t\t\tlogging.debug('HeartBeat.triggerStep() called.')\n\t\t\t# HACK: As mentioned above, we assume a single row and\n\t\t\t# only update the column of the heart position\n\t\t\tif self._heart_col != self.inputs.heart_pos:\n\t\t\t\tself._update_heart_position(\n\t\t\t\t\tself._heart_row, self.inputs.heart_pos)\n\t\t\t# Update brightness based on input values.\n\t\t\tif self._max_brightness != self.inputs.max_brightness:\n\t\t\t\tself._max_brightness = self.inputs.max_brightness\n\t\t\t\tself._led_values = self._compute_heart_values()\n\t\t\tif ((self._red != self.inputs.red) or\n\t\t\t (self._green != self.inputs.green) or\n\t\t\t (self._blue != self.inputs.blue)):\n\t\t\t\tself._update_color((self.inputs.red,\n\t\t\t\t\t\t self.inputs.green,\n\t\t\t\t\t\t self.inputs.blue))\n\t\t\tself._update_leds()\n\t\t\tself.requestUpdate()\n\n\tdef triggerSequence(self, *args):\n\t\tif self.inputs.triggerSequence:\n\t\t\tlogging.info('HeartBeat.triggerSequence() called.')\n\t\t\tself.inputs.doCommand(['triggerStep', 'refresh'])\n\t\t\tself.sequenceTriggered = True\n\n\tdef getState(self, row, col):\n\t\tif (row < 0) or (row >= self._led_grid.row_count):\n\t\t\treturn (0, 0, 0)\n\t\telif (col < 0) or (col >= self._led_grid.col_count):\n\t\t\treturn (0, 0, 0)\n\t\telse:\n\t\t\treturn self._led_grid.getLED(row, col).getColorWithBrightness()\n\n\nclass Image(object):\n\t\"\"\"A container class for Image data.\"\"\"\n\n\tdef __init__(self, name, path):\n\t\t# Name of the image.\n\t\tself.name = name\n\t\t# Path to the image\n\t\t#\n\t\t# TODO: specify whether it's an absolute or relative path.\n\t\tself.path = path\n\n\nclass FromImage(PatternBase):\n\t\"\"\"A pattern that generates data based on predefined images.\"\"\"\n\n\t# A map of image IDs to Image() objects.\n\tsupported_images = {\n\t\t\"0\": Image(\"image 1\", \"path/to/image1\"),\n\t\t\"1\": Image(\"image 2\", \"path/to/image2\"),\n\t\t\"2\": Image(\"image 3\", \"path/to/image3\"),\n\t\t}\n\n\tdef __init__(self, *args):\n\t\t# A dictionary of image IDs (as defined in 'supported_images'\n\t\t# above) to image names shown in the UI.\n\t\tchoices = {}\n\t\tfor image_id, image in self.supported_images.items():\n\t\t\tchoices[image_id] = image.name\n\t\t# A dictionary of input parameters rendered in the UI. Note\n\t\t# that those are used by PatternBase.__init__() to construct\n\t\t# self.inputs, an InputManager.InputCollection object.\n\t\t#\n\t\t# 'type' and 'subtype' are used to create the input name as\n\t\t# defined in Inputs.Basic.inputTypes.\n\t\tself.inputParams = {\n\t\t\t'updateButton' : {\n\t\t\t\t'descriptionInPattern' : 'update',\n\t\t\t\t'type' : 'pulse',\n\t\t\t\t'subType' : 'button',\n\t\t\t\t'bindToFunction' : 'updateButtonPressed',\n\t\t\t},\n\t\t\t'imageSelection' : {\n\t\t\t\t'descriptionInPattern' : 'Image used for LED input',\n\t\t\t\t'type' : 'text',\n\t\t\t\t'subType' : 'choice',\n\t\t\t\t'choices': choices,\n\t\t\t},\n\t\t}\n\t\tPatternBase.__init__(self, *args)\n\t\tself.patternName = 'From Image'\n\t\tself._current_image_id = \"0\"\n\n\tdef updateButtonPressed(self, *args):\n\t\t\"\"\"Change behavor when the user pressed the update button.\"\"\"\n\t\t# Through the magic of the framework, user-provided input data\n\t\t# can be found in self.inputs.\n\t\t#\n\t\t# Use default in case user has not selected anything, yet.\n\t\tif not self.inputs.imageSelection:\n\t\t\tnew_image_id = self._current_image_id\n\t\telse:\n\t\t\tnew_image_id = self.inputs.imageSelection\n\t\tself._current_image_id = new_image_id\n\t\tlogger.info('now using image %s (%s, %s)',\n\t\t\t new_image_id,\n\t\t\t self.supported_images[new_image_id].name,\n\t\t\t self.supported_images[new_image_id].path)\n\t\t# TODO: update image used for LED pattern generation.\n\t\tif self.inputs.updateButton:\n\t\t\tself.requestUpdate()\n\n\tdef getState(self, row, col):\n\t\t\"\"\"Return state of the LED at [row,col].\"\"\"\n\t\treturn self._current_image_id\n\n\tdef stop(self):\n\t\tPatternBase.stop(self)\n", "sub_path": "Patterns/LED.py", "file_name": "LED.py", "file_ext": "py", "file_size_in_byte": 15417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "Patterns.PatternBase.PatternBase", "line_number": 131, "usage_type": "name"}, {"api_name": "Patterns.PatternBase.PatternBase.__init__", "line_number": 241, "usage_type": "call"}, {"api_name": "Patterns.PatternBase.PatternBase", "line_number": 241, "usage_type": "name"}, {"api_name": "scipy.signal.wavelets.daub", "line_number": 289, "usage_type": "call"}, {"api_name": "scipy.signal.wavelets", "line_number": 289, "usage_type": "attribute"}, {"api_name": "scipy.signal", "line_number": 289, "usage_type": "name"}, {"api_name": "scipy.signal.resample", "line_number": 292, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 292, "usage_type": "name"}, {"api_name": "scipy.concatenate", "line_number": 294, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 366, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 387, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 408, "usage_type": "call"}, {"api_name": "Patterns.PatternBase.PatternBase", "line_number": 433, "usage_type": "name"}, {"api_name": "Patterns.PatternBase.PatternBase.__init__", "line_number": 469, "usage_type": "call"}, {"api_name": "Patterns.PatternBase.PatternBase", "line_number": 469, "usage_type": "name"}, {"api_name": "Patterns.PatternBase.PatternBase.stop", "line_number": 497, "usage_type": "call"}, {"api_name": "Patterns.PatternBase.PatternBase", "line_number": 497, "usage_type": "name"}]} +{"seq_id": "472477968", "text": "# -*- coding: utf-8 -*-\nimport scrapy, re\nfrom Scrapy_E_info_V1_01.items import InfoItem\nimport time\nfrom scrapy.utils import request\n\n\nclass CinicorgV1Spider(scrapy.Spider):\n name = 'cinicorg_V1'\n allowed_domains = ['www.cinic.org.cn']\n base_url = 'http://www.cinic.org.cn'\n url_name = '中国产业经济信息网'\n urls = {\n 'http://www.cinic.org.cn/hy/jz/index_2.html': '行业资讯4', # 每页10条\n }\n\n def start_requests(self):\n\n for url, cate in self.urls.items():\n page = cate[4:]\n for i in range(2, int(page)):\n link = url.replace(url[-6:-4], f'{i}.')\n yield scrapy.Request(url=link, callback=self.parse, meta={'cate': cate[:4]}, dont_filter=True)\n\n yield scrapy.Request(url='http://www.cinic.org.cn/hy/jz/index.html', callback=self.parse, meta={'cate': '行业资讯'},\n dont_filter=True)\n\n def parse(self, response):\n cate = response.meta['cate']\n config_list = response.xpath('//div[@class=\"col-l\"]/ul/li')\n for config in config_list:\n item = InfoItem()\n title_img = config.xpath('./div/div[@class=\"img\"]/a/img/@src').extract_first()\n title = config.xpath('./div/div[@class=\"txt\"]/h3/a/text()').extract_first()\n link = self.base_url + config.xpath('./div/div[@class=\"txt\"]/h3/a/@href').extract_first()\n issue_time = config.xpath(\n './div/div[@class=\"txt\"]/div/div/div/span[@class=\"sp2\"]/text()').extract_first()\n # tags = config.xpath('./div[@class=\"right\"]/ul/li/a/text()').extract()\n item['title'] = title\n item['issue_time'] = issue_time\n item['content_url'] = link\n item['information_categories'] = cate\n item['title_images'] = self.base_url + title_img if title_img else None\n req = scrapy.Request(url=link, callback=self.parse2,\n meta={'item': item},\n dont_filter=True)\n item['id'] = request.request_fingerprint(req)\n yield req\n\n def parse2(self, response):\n item = response.meta['item']\n content = response.xpath('//div[@class=\"dc-ccm1\"]').extract_first()\n images = response.xpath('//div[@class=\"dc-ccm1\"]//img/@src').extract()\n if images:\n images_url = []\n for img in images:\n if 'http' in img:\n images_url.append(img)\n else:\n image = f'{self.base_url}{img}'\n images_url.append(image)\n images_urls = '; '.join(images_url)\n item['images'] = images_urls if images_urls else None\n else:\n item['images'] = None\n item['tags'] = None\n item['industry_categories'] = 'E'\n item['industry_Lcategories'] = '47'\n item['industry_Mcategories'] = None\n item['industry_Scategories'] = None\n item['sign'] = '19'\n item['update_time'] = str(int(time.time() * 1000))\n item['information_source'] = '中国产业经济信息网'\n try:\n source = response.xpath('//div[@class=\"col-l\"]/div[1]/center/text()').extract_first()\n source = re.search(r'来源:(.+)时间', source).group(1).strip()\n item['source'] = source if source else '中国产业经济信息网'\n except:\n item['source'] = None\n item['area'] = None\n item['address'] = None\n item['attachments'] = None\n # item['images'] = None\n # author = response.xpath('//div[@class=\"info_title\"]/span[3]/text()').extract_first()[3:].strip()\n item['author'] = None\n item['content'] = content\n if content:\n yield item\n self.logger.info(\"title({}), issue_time({})\".format(item['title'], item['issue_time']))\n\n\nif __name__ == '__main__':\n from scrapy import cmdline\n\n cmdline.execute(['scrapy', 'crawl', 'cinicorg_V1'])\n", "sub_path": "info_spider/Scrapy_E_info_V1_01/Scrapy_E_info_V1_01/spiders/cinicorg_V1.py", "file_name": "cinicorg_V1.py", "file_ext": "py", "file_size_in_byte": 4024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 25, "usage_type": "call"}, {"api_name": "Scrapy_E_info_V1_01.items.InfoItem", "line_number": 32, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 44, "usage_type": "call"}, {"api_name": "scrapy.utils.request.request_fingerprint", "line_number": 47, "usage_type": "call"}, {"api_name": "scrapy.utils.request", "line_number": 47, "usage_type": "name"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 76, "usage_type": "call"}, {"api_name": "scrapy.cmdline.execute", "line_number": 95, "usage_type": "call"}, {"api_name": "scrapy.cmdline", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "92012359", "text": "from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic.edit import UpdateView\nfrom django.views.generic import ListView,DetailView\nfrom django.shortcuts import *\nfrom .models import profile, followers, following\nfrom .forms import LoginForm, SignupForm, ProfileForm,PasswordChangeForm\nfrom django.contrib.auth.models import User\nfrom django.views import View\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout,update_session_auth_hash\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom posts.models import posts,like,comment\nfrom django.db.models import Q\n\n\n\nclass SignupView(View):\n def get(self,request,*args,**kwargs):\n userform=SignupForm\n return render(request, template_name='accounts/signup_form.html', context={'userform':userform})\n def post ( self, request, *args, **kwargs ):\n form=SignupForm(request.POST)\n if form.is_valid():\n user=User.objects.create_user(**form.cleaned_data)\n user.save()\n profile_form=ProfileForm(data=None)\n profiles=profile_form.save(commit=False)\n profiles.user_id=user\n profiles.save()\n return redirect(\"users:login_form\")\n\nclass LoginView(View):\n def get(self,request,*args,**kwargs):\n loginform=LoginForm\n return render(request, template_name='accounts/login_form.html', context={'loginform':loginform})\n def post ( self, request, *args, **kwargs ):\n form=LoginForm(request.POST)\n if form.is_valid ():\n user = authenticate ( username=form.cleaned_data['username'],\n password=form.cleaned_data['password'] )\n if user !=None:\n login(request,user)\n return redirect(\"posts:feed\")\n return redirect(\"users:login_form\")\n\n\nclass LogOutView(View):\n def get( self, request: object ) -> object:\n logout(request)\n return redirect(\"users:login_form\")\n\nclass ListAccountView(LoginRequiredMixin,ListView):\n login_url = '/users/login/'\n context_object_name = 'userform'\n def get(self, request, *args, **kwargs):\n userform = list(User.objects.values('id','first_name','username','email').all().filter(id=kwargs['pk']))\n userform = userform + list(User.objects.values('id','first_name','username','email').all().filter(is_superuser=0).filter(~Q(id=kwargs['pk'])))\n for element in userform:\n element['profile']=profile.objects.all().filter(user_id=element['id'])[0]\n element['followers']=following.objects.all().filter(following_id=element['profile']).count()\n element['following'] = following.objects.all().filter(user_id=element['id']).count()\n element['is_followed']= following.objects.all().filter(user_id=request.user,following_id=element['profile']).count()\n return render(request, template_name='accounts/account_list.html',context={'userform':userform,'id':int(request.user.id),'title':\"List of users\"})\n\n\nclass DetailAccountView(LoginRequiredMixin,DetailView):\n login_url = '/users/login/'\n context_object_name = 'userform'\n def get(self, request, *args, **kwargs):\n userform = User.objects.values('id', 'first_name', 'username', 'email').filter(id=kwargs['pk'])[0]\n profileform = profile.objects.all().filter(user_id=userform['id'])[0]\n followers_count = following.objects.all().filter(following_id=profileform).count()\n following_count = following.objects.filter(user_id=userform['id']).count()\n connected=following.objects.filter(user_id=request.user,following_id=profileform).count()\n total_posts = posts.objects.all().filter(user_id=userform['id'])\n userform['profile']=profileform\n userform['followers']=followers_count\n userform['following']=following_count\n userform['posts']=total_posts\n userform['post_count']=total_posts.count()\n return render(request, template_name='accounts/account_detail.html',\n context={'userform': userform, 'id': int(request.user.id), 'connected': int(connected)})\n\nclass UpdateAccountView(LoginRequiredMixin,UpdateView):\n login_url = '/users/login/'\n model = profile\n template_name='accounts/add_user_profile.html'\n def get(self, request, *args, **kwargs):\n if int(request.user.id) != int(kwargs['pk']):\n return redirect(\"users:update\", request.user.id)\n myprofile = profile.objects.get(user_id=request.user.id)\n form = ProfileForm(instance=myprofile)\n details=profile.objects.all().filter(user_id=request.user.id)[0]\n return render(request, self.template_name, {'profiledetails': details,'profileform': form,'username': str(request.user)})\n def post(self, request, *args, **kwargs):\n instance = profile.objects.get(user_id=request.user.id)\n form = ProfileForm(request.POST,request.FILES, instance=instance)\n if form.is_valid():\n form.save()\n return redirect(\"users:profile\", request.user.id)\n return redirect('users:profile', request.user.id)\n\n\nclass FollowersListView(LoginRequiredMixin,ListView):\n login_url = '/users/login/'\n def get(self, request, *args, **kwargs):\n profile_id=profile.objects.all().filter(user_id__id=kwargs['pk'])[0]\n list_of_users=list(item['user_id'] for item in list(following.objects.values('user_id').filter(following_id=profile_id)))\n userform = list(User.objects.values('id', 'first_name', 'username', 'email').filter(id__in=list_of_users))\n for element in userform:\n element['profile'] = profile.objects.all().filter(user_id_id=element['id'])[0]\n element['followers'] = following.objects.all().filter(following_id=element['profile']).count()\n element['following'] = following.objects.all().filter(user_id=element['id']).count()\n element['is_followed'] = following.objects.all().filter(user_id=request.user,\n following_id=element['profile']).count()\n return render(request, template_name='accounts/account_list.html',\n context={'title':\"List of followers\", 'userform': userform, 'id': int(request.user.id)})\n\n\nclass FollowingListView(LoginRequiredMixin,ListView):\n login_url = '/users/login/'\n def get(self, request, *args, **kwargs):\n user_id = User.objects.all().filter(id=kwargs['pk'])[0]\n list_of_users = list(\n item['following_id'] for item in list(following.objects.values('following_id').filter(user_id=user_id)))\n userform = list(User.objects.values('id', 'first_name', 'username', 'email').filter(profile__id__in=list_of_users))\n for element in userform:\n element['profile'] = profile.objects.all().filter(user_id_id=element['id'])[0]\n element['followers'] = following.objects.all().filter(following_id=element['profile']).count()\n element['following'] = following.objects.all().filter(user_id=element['id']).count()\n element['is_followed'] = 1\n return render(request, template_name='accounts/account_list.html',\n context={'title':\"List of following\",'userform': userform, 'id': int(request.user.id)})\n\n\n@login_required\ndef change_password(request, *args, **kwargs):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(request, 'Your password was successfully updated!')\n return redirect('change_password')\n else:\n messages.error(request, 'Please correct the error below.')\n else:\n form = PasswordChangeForm(request.user)\n return render(request, 'accounts/change_password.html', {\n 'form': form\n })\n\n@login_required\ndef follow_view(request, *args, **kwargs):\n try:\n follower = User.objects.get(id=request.user.id)\n followings = profile.objects.get(user_id=kwargs['pk'])\n except User.DoesNotExist:\n messages.warning(\n request,\n 'requested user is not a registered user.'\n )\n return HttpResponseRedirect(reverse_lazy('home'))\n if str(follower) == str(following):\n messages.warning(\n request,\n 'You cannot follow yourself.'\n )\n else:\n _, created = following.objects.get_or_create(\n user_id=follower,\n following_id=followings\n )\n if (created):\n messages.success(\n request,\n 'You\\'ve successfully followed.'\n )\n else:\n messages.warning(\n request,\n 'You\\'ve already followed.'\n )\n return HttpResponseRedirect(\n reverse_lazy(\n 'users:profile',\n kwargs={'pk': int(request.user.id)}\n )\n )\n\n\n@login_required\ndef unfollow_view(request, *args, **kwargs):\n try:\n follower = User.objects.get(id=request.user.id)\n followings = profile.objects.get(user_id=kwargs['pk'])\n if str(follower) == str(following):\n messages.warning(\n request,\n 'You cannot unfollow yourself.'\n )\n else:\n status=following.objects.filter(user_id=follower,following_id=followings).delete()\n print(status)\n messages.success(\n request,\n 'You\\'ve just unfollowed.'\n )\n except User.DoesNotExist:\n messages.warning(\n request,\n 'Requested user is not a registered user.'\n )\n return HttpResponseRedirect(reverse_lazy('users:profile',kwargs={'pk': int(request.user.id)}))\n except followers.DoesNotExist:\n messages.warning(\n request,\n 'You didn\\'t follow this person.'\n )\n return HttpResponseRedirect(\n reverse_lazy(\n 'users:profile',\n kwargs={'pk': int(request.user.id)}\n )\n )\n\n", "sub_path": "users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 10280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.views.View", "line_number": 19, "usage_type": "name"}, {"api_name": "forms.SignupForm", "line_number": 21, "usage_type": "name"}, {"api_name": "forms.SignupForm", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 26, "usage_type": "name"}, {"api_name": "forms.ProfileForm", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 34, "usage_type": "name"}, {"api_name": "forms.LoginForm", "line_number": 36, "usage_type": "name"}, {"api_name": "forms.LoginForm", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 44, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 49, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 54, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.values", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 58, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.values", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 59, "usage_type": "call"}, {"api_name": "models.profile.objects.all", "line_number": 61, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 61, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 62, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 62, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 63, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 63, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 68, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 68, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.values", "line_number": 72, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 72, "usage_type": "name"}, {"api_name": "models.profile.objects.all", "line_number": 73, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 73, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 74, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 74, "usage_type": "name"}, {"api_name": "models.following.objects.filter", "line_number": 75, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 75, "usage_type": "name"}, {"api_name": "models.following.objects.filter", "line_number": 76, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 76, "usage_type": "name"}, {"api_name": "posts.models.posts.objects.all", "line_number": 77, "usage_type": "call"}, {"api_name": "posts.models.posts.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "posts.models.posts", "line_number": 77, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 86, "usage_type": "name"}, {"api_name": "django.views.generic.edit.UpdateView", "line_number": 86, "usage_type": "name"}, {"api_name": "models.profile", "line_number": 88, "usage_type": "name"}, {"api_name": "models.profile.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 93, "usage_type": "name"}, {"api_name": "forms.ProfileForm", "line_number": 94, "usage_type": "call"}, {"api_name": "models.profile.objects.all", "line_number": 95, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 95, "usage_type": "name"}, {"api_name": "models.profile.objects.get", "line_number": 98, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 98, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 98, "usage_type": "name"}, {"api_name": "forms.ProfileForm", "line_number": 99, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 106, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 106, "usage_type": "name"}, {"api_name": "models.profile.objects.all", "line_number": 109, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 109, "usage_type": "name"}, {"api_name": "models.following.objects.values", "line_number": 110, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 110, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.values", "line_number": 111, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 111, "usage_type": "name"}, {"api_name": "models.profile.objects.all", "line_number": 113, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 113, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 114, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 114, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 115, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 115, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 116, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 116, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 122, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 122, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 125, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 125, "usage_type": "name"}, {"api_name": "models.following.objects.values", "line_number": 127, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 127, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.values", "line_number": 128, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 128, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 128, "usage_type": "name"}, {"api_name": "models.profile.objects.all", "line_number": 130, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 130, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 131, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 131, "usage_type": "name"}, {"api_name": "models.following.objects.all", "line_number": 132, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 132, "usage_type": "name"}, {"api_name": "forms.PasswordChangeForm", "line_number": 141, "usage_type": "call"}, {"api_name": "django.contrib.auth.update_session_auth_hash", "line_number": 144, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 145, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 145, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 148, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 148, "usage_type": "name"}, {"api_name": "forms.PasswordChangeForm", "line_number": 150, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 138, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 158, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 158, "usage_type": "name"}, {"api_name": "models.profile.objects.get", "line_number": 159, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 159, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 160, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 160, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 161, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 161, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 165, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 165, "usage_type": "call"}, {"api_name": "models.following", "line_number": 166, "usage_type": "argument"}, {"api_name": "django.contrib.messages.warning", "line_number": 167, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 167, "usage_type": "name"}, {"api_name": "models.following.objects.get_or_create", "line_number": 172, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 172, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 177, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 177, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 182, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 182, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 186, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 187, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 155, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 197, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 197, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 197, "usage_type": "name"}, {"api_name": "models.profile.objects.get", "line_number": 198, "usage_type": "call"}, {"api_name": "models.profile.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "models.profile", "line_number": 198, "usage_type": "name"}, {"api_name": "models.following", "line_number": 199, "usage_type": "argument"}, {"api_name": "django.contrib.messages.warning", "line_number": 200, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 200, "usage_type": "name"}, {"api_name": "models.following.objects.filter", "line_number": 205, "usage_type": "call"}, {"api_name": "models.following.objects", "line_number": 205, "usage_type": "attribute"}, {"api_name": "models.following", "line_number": 205, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 207, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 207, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 211, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 211, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 212, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 212, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 216, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 216, "usage_type": "call"}, {"api_name": "models.followers.DoesNotExist", "line_number": 217, "usage_type": "attribute"}, {"api_name": "models.followers", "line_number": 217, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 218, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 218, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 222, "usage_type": "call"}, {"api_name": "django.urls.reverse_lazy", "line_number": 223, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 194, "usage_type": "name"}]} +{"seq_id": "422874536", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 9 19:23:58 2018\nShochastic Gradient Descent\nMonteCarlo with Approximation\n@author: FPTShop\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Grid():\n def __init__(self, size, walls, end_states):\n self.size = size\n self.walls = walls\n self.end_states = end_states\n self.all_states = {}\n for i in range(size[0]):\n for j in range(size[1]):\n if (i,j) not in walls:\n self.all_states[(i,j)] = -0.2\n # \"hot sand\" grid, there is small punishment in each dress\n # therefore, the agent is prone to find the final state fast\n # end_states is the state that end the game, with reward/punishment\n for x in end_states:\n self.all_states[x] = end_states[x]\n self.policy = {}\n # initilize random action policy for each state\n for i in range(size[0]):\n for j in range(size[1]):\n if (i,j) not in walls and (i,j) not in end_states:\n self.policy[(i, j)] = np.random.randint(1,5)\n else:\n self.policy[(i, j)] = 0\n self.sa_history = []\n self.reward_history = []\n self.game_over = False\n \n def action(self, a):\n p = np.random.randn()\n if p > 0.5:\n return a\n else:\n n = np.random.randint(1,5)\n return n\n \n def move(self, a, i, j):\n self.sa_history.append((i,j))\n old_n = (i,j)\n n = (i,j)\n if a==1:\n if i>0 and (i-1, j) not in self.walls:\n n = (i-1, j) # up\n if a==2:\n if j0 and (i, j-1) not in self.walls:\n n = (i, j-1) # left\n # give overwhelming result if stay in one position\n if n == old_n:\n self.reward_history.append(-100.0)\n self.game_over = True\n else:\n self.reward_history.append(self.all_states[n])\n if n in self.end_states:\n self.game_over = True\n return n[0], n[1]\n \n def eval_policy(self, value):\n # evaluate best action to take by finding the adjacent state with best value\n for s in self.all_states:\n best_policy = 0\n best_value = -10000\n if s not in self.end_states:\n i = s[0]\n j = s[1]\n if i>0 and (i-1, j) not in self.walls:\n if value[(i-1, j)]>best_value:\n best_value = value[(i-1, j)]\n best_policy = 1\n if jbest_value:\n best_value = value[(i, j+1)]\n best_policy = 2\n if ibest_value:\n best_value = value[(i+1, j)]\n best_policy = 3\n if j>0 and (i, j-1) not in self.walls:\n if value[(i, j-1)]>best_value:\n best_value = value[(i, j-1)]\n best_policy = 4\n self.policy[s] = best_policy\n \n def reset(self):\n self.sa_history = []\n self.reward_history = []\n self.game_over = False\n \n# initiate grid\nsize = (3, 4)\nend_states = {}\nend_states[(0,3)] = 1.0\nend_states[(1,3)] = -1.0\nwalls = [(1,1)]\ngrid = Grid(size, walls, end_states)\n\ndef f(s):\n return np.array([s[0], s[1], s[0]*s[1], 1])\n\ndef game(grid, E, lr):\n G = []\n # start position!\n x, y = 0, 2\n while not grid.game_over:\n # Epsilon greedy\n p = np.random.randn()\n if p > E:\n x, y = grid.move(grid.action(grid.policy[(x,y)]), i = x, j = y)\n else:\n x, y = grid.move(grid.action(np.random.randint(1,5)), i = x, j = y)\n grid.reward_history.reverse()\n grid.sa_history.reverse()\n visited_state = []\n v = 0\n for n, s in enumerate(grid.sa_history):\n if s not in visited_state:\n v = grid.reward_history[n] + lr*v\n G.append(v)\n return grid.sa_history, G\n\n# initializa random weight\ntheta = np.random.random(4)\n\nALPHA = 0.01\nvalue = {} \nfor i in range(size[0]):\n for j in range(size[1]):\n value[(i,j)] = 0\ndeltas = []\n\nx, y = 0, 0\nt = 1.0\nfor i in range(5000):\n if i%100==0:\n t += 0.01\n # decaying\n alpha = ALPHA/t\n \n # epsilon = 0.05\n # learning rate = 0.2\n # Q-learning\n # game() return reverse sequence of state the agent has gone throught and value of each visited state\n states, G = game(grid, 0.05, 0.2)\n biggest_diff = 0\n \n for n,s in enumerate(states):\n x = f(s)\n # f(s) = np.array([s[0], s[1], s[0]*s[1], 1])\n # theta a weight randomly initialized\n # stochastic gradient descend\n V_hat = theta.dot(x)\n theta += alpha*2*(V_hat-G[n])*x\n old_value = value[s]\n new_value = theta.dot(x)\n biggest_diff = max(biggest_diff, abs(old_value-new_value))\n value[s] = new_value\n deltas.append(biggest_diff)\n \n # revaluate the action policy for all states\n grid.eval_policy(value)\n grid.reset()\n \nplt.plot(deltas)\nplt.show()\n\nprint('-----------------------------')\nfor i in range(size[0]):\n v = ''\n for j in range(size[1]):\n if (i,j) not in end_states and (i,j) not in walls:\n v = theta.dot(f((i,j))) \n v = str(v) + '\\t'\n else:\n v = '0' + '\\t'\n print(v)\nprint('-----------------------------')\n", "sub_path": "SGD.py", "file_name": "SGD.py", "file_ext": "py", "file_size_in_byte": 5946, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "numpy.random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}]} +{"seq_id": "260664466", "text": "from __future__ import unicode_literals\nimport re\nimport unicodedata\nimport string\nfrom datetime import date, datetime\n\nfrom six import PY3\nfrom sqlalchemy.types import SchemaType, TypeDecorator, Enum\nimport dateutil.parser\n\n\nDATETIME_ISO_FORMAT = re.compile(\n '[0-9]{4}\\-[0-9]{2}\\-[0-9]{2}T[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\.[0-9]+')\n\n\ndef parse_json_with_datetime(d):\n \"\"\"\n converts iso formatted timestamps found as values in the dict d to datetime objects.\n\n >>> assert parse_json_with_datetime(dict(d='2012-12-12T20:12:12.12'))['d'].year\n \"\"\"\n res = {}\n for k, v in d.items():\n if isinstance(v, basestring) and DATETIME_ISO_FORMAT.match(v):\n v = dateutil.parser.parse(v)\n res[k] = v\n return res\n\n\ndef format_json(value):\n if isinstance(value, (date, datetime)):\n return value.isoformat()\n return value\n\n\ndef dict_append(d, k, v):\n \"\"\"\n Assumes d is a dictionary with lists as values. Appends v to the list for key k.\n\n >>> d = {}\n >>> dict_append(d, 1, 1)\n >>> assert d[1] == [1]\n >>> dict_append(d, 1, 2)\n >>> assert d[1] == [1, 2]\n \"\"\"\n if k in d:\n d[k].append(v)\n else:\n d[k] = [v]\n\n\ndef dict_merged(d, _filter=None, **kw):\n \"\"\"Updates dictionary d with the items passed as keyword parameters if the value\n passes _filter.\n\n >>> assert dict_merged(None, a=1) == {'a': 1}\n >>> assert dict_merged(None, a=1, _filter=lambda i: i != 1) == {}\n >>> assert dict_merged(None, a=None) == {}\n \"\"\"\n if not _filter:\n _filter = lambda s: s is not None\n d = d or {}\n for k, v in kw.items():\n if _filter(v):\n d[k] = v\n return d\n\n\nclass NoDefault(object):\n \"\"\"\n >>> assert repr(NoDefault())\n \"\"\"\n def __repr__(self):\n return ''\n\nNO_DEFAULT = NoDefault()\n\n\ndef xmlchars(text):\n invalid = range(0x9)\n invalid.extend([0xb, 0xc])\n invalid.extend(range(0xe, 0x20))\n return re.sub('|'.join('\\\\x%0.2X' % i for i in invalid), '', text)\n\n\ndef format_size(num):\n \"\"\"http://stackoverflow.com/a/1094933\n \"\"\"\n for x in ['bytes', 'KB', 'MB', 'GB']:\n if num < 1024.0 and num > -1024.0:\n return \"%3.1f%s\" % (num, x)\n num /= 1024.0\n return \"%3.1f%s\" % (num, 'TB')\n\n\nclass UnicodeMixin(object):\n def __unicode__(self):\n \"\"\"\n :return: a human readable label for the object\n \"\"\"\n return '%s' % self # pragma: no cover\n\n def __str__(self):\n \"\"\"\n :return: a human readable label for the object, appropriately encoded (or not)\n \"\"\"\n if PY3:\n return self.__unicode__() # pragma: no cover\n return self.__unicode__().encode('utf-8')\n\n\n#\n# From \"The Enum Recipe\": http://techspot.zzzeek.org/2011/01/14/the-enum-recipe/\n#\nclass EnumSymbol(UnicodeMixin):\n \"\"\"Define a fixed symbol tied to a parent class.\"\"\"\n\n def __init__(self, cls_, name, value, description, *args):\n self.cls_ = cls_\n self.name = name\n self.value = value\n self.description = description\n self.args = args\n\n def __reduce__(self):\n \"\"\"Allow unpickling to return the symbol linked to the DeclEnum class.\"\"\"\n return getattr, (self.cls_, self.name) # pragma: no cover\n\n def __iter__(self):\n return iter([self.value, self.description])\n\n def __repr__(self):\n return \"<%s>\" % self.name\n\n def __unicode__(self):\n return self.value\n\n def __cmp__(self, other):\n return cmp(self.value, other.value)\n\n def __json__(self, request=None):\n return self.value\n\n\nclass EnumMeta(type):\n \"\"\"Generate new DeclEnum classes.\"\"\"\n\n def __init__(cls, classname, bases, dict_):\n cls._reg = reg = cls._reg.copy()\n for k, v in dict_.items():\n if isinstance(v, tuple):\n sym = reg[v[0]] = EnumSymbol(cls, k, *v)\n setattr(cls, k, sym)\n return type.__init__(cls, classname, bases, dict_)\n\n def __iter__(cls):\n return iter(sorted(cls._reg.values()))\n\n\nclass DeclEnum(object):\n \"\"\"Declarative enumeration.\"\"\"\n\n __metaclass__ = EnumMeta\n _reg = {}\n\n @classmethod\n def from_string(cls, value):\n try:\n return cls._reg[value]\n except KeyError:\n raise ValueError(\"Invalid value for %r: %r\" % (cls.__name__, value))\n\n @classmethod\n def values(cls):\n return cls._reg.keys()\n\n @classmethod\n def db_type(cls):\n return DeclEnumType(cls)\n\n\nclass DeclEnumType(SchemaType, TypeDecorator):\n def __init__(self, enum):\n self.enum = enum\n self.impl = Enum(\n *enum.values(),\n name=\"ck%s\" % re.sub(\n '([A-Z])', lambda m: \"_\" + m.group(1).lower(), enum.__name__))\n\n def _set_table(self, table, column):\n self.impl._set_table(table, column) # pragma: no cover\n\n def copy(self):\n return DeclEnumType(self.enum) # pragma: no cover\n\n def process_bind_param(self, value, dialect):\n if value is None:\n return None\n return value.value\n\n def process_result_value(self, value, dialect):\n if value is None:\n return None\n return self.enum.from_string(value.strip())\n\n\n#def flatten_dict(d, parent_key='', sep='_'):\n# items = []\n# for k, v in d.items():\n# new_key = parent_key + sep + k if parent_key else k\n# if isinstance(v, collections.MutableMapping):\n# items.extend(flatten_dict(v, parent_key=new_key, sep=sep).items())\n# else:\n# items.append((new_key, v))\n# return dict(items)\n\n\n# Standard abbreviations according to the Leipzig Glossing Rules\n# see http://www.eva.mpg.de/lingua/resources/glossing-rules.php\nLGR_ABBRS = {\n 'A': 'agent-like argument of canonical transitive verb',\n 'ABL': 'ablative',\n 'ABS': 'absolutive',\n 'ACC': 'accusative',\n 'ADJ': 'adjective',\n 'ADV': 'adverb(ial)',\n 'AGR': 'agreement',\n 'ALL': 'allative',\n 'ANTIP': 'antipassive',\n 'APPL': 'applicative',\n 'ART': 'article',\n 'AUX': 'auxiliary',\n 'BEN': 'benefactive',\n 'CAUS': 'causative',\n 'CLF': 'classifier',\n 'COM': 'comitative',\n 'COMP': 'complementizer',\n 'COMPL': 'completive',\n 'COND': 'conditional',\n 'COP': 'copula',\n 'CVB': 'converb',\n 'DAT': 'dative',\n 'DECL': 'declarative',\n 'DEF': 'definite',\n 'DEM': 'demonstrative',\n 'DET': 'determiner',\n 'DIST': 'distal',\n 'DISTR': 'distributive',\n 'DU': 'dual',\n 'DUR': 'durative',\n 'ERG': 'ergative',\n 'EXCL': 'exclusive',\n 'F': 'feminine',\n 'FOC': 'focus',\n 'FUT': 'future',\n 'GEN': 'genitive',\n 'IMP': 'imperative',\n 'INCL': 'inclusive',\n 'IND': 'indicative',\n 'INDF': 'indefinite',\n 'INF': 'infinitive',\n 'INS': 'instrumental',\n 'INTR': 'intransitive',\n 'IPFV': 'imperfective',\n 'IRR': 'irrealis',\n 'LOC': 'locative',\n 'M': 'masculine',\n 'N': 'neuter',\n 'N-': 'non- (e.g. NSG nonsingular, NPST nonpast)',\n 'NEG': 'negation, negative',\n 'NMLZ': 'nominalizer/nominalization',\n 'NOM': 'nominative',\n 'OBJ': 'object',\n 'OBL': 'oblique',\n 'P': 'patient-like argument of canonical transitive verb',\n 'PASS': 'passive',\n 'PFV': 'perfective',\n 'PL': 'plural',\n 'POSS': 'possessive',\n 'PRED': 'predicative',\n 'PRF': 'perfect',\n 'PRS': 'present',\n 'PROG': 'progressive',\n 'PROH': 'prohibitive',\n 'PROX': 'proximal/proximate',\n 'PST': 'past',\n 'PTCP': 'participle',\n 'PURP': 'purposive',\n 'Q': 'question particle/marker',\n 'QUOT': 'quotative',\n 'RECP': 'reciprocal',\n 'REFL': 'reflexive',\n 'REL': 'relative',\n 'RES': 'resultative',\n 'S': 'single argument of canonical intransitive verb',\n 'SBJ': 'subject',\n 'SBJV': 'subjunctive',\n 'SG': 'singular',\n 'TOP': 'topic',\n 'TR': 'transitive',\n 'VOC': 'vocative',\n}\n\n\ndef slug(s, remove_whitespace=True, lowercase=True):\n \"\"\"\n :return: A condensed version of the string s, containing only lowercase alphanumeric \\\n characters.\n \"\"\"\n res = ''.join((c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'))\n if lowercase:\n res = res.lower()\n for c in string.punctuation:\n res = res.replace(c, '')\n res = re.sub('\\s+', '' if remove_whitespace else ' ', res)\n res = res.encode('ascii', 'ignore').decode('ascii')\n assert re.match('[ A-Za-z0-9]*$', res)\n return res\n\n\ndef encoded(string, encoding='utf8'):\n assert isinstance(string, basestring)\n return string.encode(encoding) if isinstance(string, unicode) else string\n\n\nclass cached_property(object):\n \"\"\"Decorator for read-only properties evaluated only once.\n\n It can be used to create a cached property like this::\n\n import random\n\n # the class containing the property must be a new-style class\n class MyClass(object):\n # create property whose value is cached\n @cached_property()\n def randint(self):\n # will only be evaluated once.\n return random.randint(0, 100)\n\n The value is cached in the '_cache' attribute of the object instance that\n has the property getter method wrapped by this decorator. The '_cache'\n attribute value is a dictionary which has a key for every property of the\n object which is wrapped by this decorator. Each entry in the cache is\n created only when the property is accessed for the first time and is the last\n computed property value.\n\n To expire a cached property value manually just do::\n\n del instance._cache[]\n\n inspired by the recipe by Christopher Arndt in the PythonDecoratorLibrary\n\n >>> import random\n >>> class C(object):\n ... @cached_property()\n ... def attr(self):\n ... return random.randint(1, 100000)\n ...\n >>> c = C()\n >>> call1 = c.attr\n >>> assert call1 == c.attr\n >>> del c._cache['attr']\n >>> assert call1 != c.attr\n \"\"\"\n def __call__(self, fget):\n self.fget = fget\n self.__doc__ = fget.__doc__\n self.__name__ = fget.__name__\n self.__module__ = fget.__module__\n return self\n\n def __get__(self, inst, owner):\n if not hasattr(inst, '_cache'):\n inst._cache = {}\n if self.__name__ not in inst._cache:\n inst._cache[self.__name__] = self.fget(inst)\n return inst._cache[self.__name__]\n", "sub_path": "clld/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 10482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 25, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 25, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 83, "usage_type": "call"}, {"api_name": "six.PY3", "line_number": 107, "usage_type": "name"}, {"api_name": "sqlalchemy.types.SchemaType", "line_number": 182, "usage_type": "name"}, {"api_name": "sqlalchemy.types.TypeDecorator", "line_number": 182, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Enum", "line_number": 185, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 187, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 310, "usage_type": "call"}, {"api_name": "unicodedata.category", "line_number": 311, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 314, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 316, "usage_type": "call"}, {"api_name": "re.match", "line_number": 318, "usage_type": "call"}, {"api_name": "string.encode", "line_number": 324, "usage_type": "call"}]} +{"seq_id": "26433804", "text": "# Copyright (c) 2020 fortiss GmbH\n#\n# Authors: Patrick Hart, Julian Bernhard, Klemens Esterle,\n# Tobias Kessler and Mansoor Nasir\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\n# The code is adapted from opensource implementation - https://github.com/ku2482/fqf-iqn-qrdqn.pytorch\n# MIT License - Copyright (c) 2020 Toshiki Watanabe\n\nimport os\nimport logging\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\nimport numpy as np\nimport pickle\nimport os\nfrom abc import ABC, abstractmethod\n\n# BARK-ML imports\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils \\\n import RunningMeanStats, LinearAnneaer\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.memory \\\n import LazyMultiStepMemory, LazyPrioritizedDemMultiStepMemory, LazyPrioritizedMultiStepMemory\nfrom bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML\n\n# TODO: Imports to remove\nfrom bark.runtime.commons.parameters import ParameterServer\n\n\n# BARK imports\nfrom bark.core.models.behavior import BehaviorModel\n\ndef to_pickle(obj, dir, file):\n path = os.path.join(dir, file)\n with open(path, 'wb') as handle:\n pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef from_pickle(dir, file):\n path = os.path.join(dir, file)\n with open(path, 'rb') as handle:\n obj = pickle.load(handle)\n return obj\n\nclass TrainingBenchmark:\n def __init__(self):\n self.training_env = None\n self.num_episodes = None\n self.max_episode_steps = None\n self.agent = None\n\n def reset(self, training_env, num_eval_steps, max_episode_steps, agent):\n self.training_env = training_env\n self.num_eval_steps = num_eval_steps\n self.max_episode_steps = max_episode_steps\n self.agent = agent\n\n def run(self):\n # returns dict with evaluated metrics\n num_episodes = 0\n num_steps = 0\n total_return = 0.0\n\n while True:\n state = self.training_env.reset()\n episode_steps = 0\n episode_return = 0.0\n done = False\n while (not done) and episode_steps <= self.max_episode_steps:\n if self.agent.is_random(eval=True):\n action = self.agent.explore()\n else:\n action = self.agent.Act(state)\n\n next_state, reward, done, _ = self.training_env.step(action)\n num_steps += 1\n episode_steps += 1\n episode_return += reward\n state = next_state\n\n num_episodes += 1\n total_return += episode_return\n\n if num_steps > self.num_eval_steps:\n break\n\n mean_return = total_return / num_episodes\n return {\"mean_return\" : mean_return}, f\"Mean return: {mean_return}\"\n\n def is_better(self, eval_result1, than_eval_result2):\n return eval_result1[\"mean_return\"] > than_eval_result2[\"mean_return\"]\n\n\n\n\nclass BaseAgent(BehaviorModel):\n def __init__(self, agent_save_dir=None, env=None, params=None, training_benchmark=None, checkpoint_load=None,\n is_learn_from_demonstrations=False, is_checkpoint_run=False, is_be_obs=False, is_common_taus=False,\n is_online_demo=False):\n BehaviorModel.__init__(self, params)\n self._params = params\n self._env = env\n self._training_benchmark = training_benchmark or TrainingBenchmark()\n self._agent_save_dir = agent_save_dir\n self.is_learn_from_demonstrations = is_learn_from_demonstrations\n self.is_checkpoint_run = is_checkpoint_run\n self.is_common_taus = is_common_taus\n self.is_online_demo = is_online_demo\n if not checkpoint_load and params:\n if not env:\n raise ValueError(\"Environment must be passed for initialization\")\n self.reset_params(self._params)\n self.reset_action_observer(env)\n self.init_always()\n self.reset_training_variables()\n elif checkpoint_load:\n self.reset_params(self._params)\n if is_learn_from_demonstrations:\n self.reset_action_observer(env)\n if is_online_demo:\n self.load_only_memory_pickle(agent_save_dir)\n else:\n self.load_pickable_members(agent_save_dir)\n self.init_always()\n self.load_models(BaseAgent.check_point_directory(agent_save_dir, checkpoint_load) \\\n if checkpoint_load==\"best\" else BaseAgent.check_point_directory(agent_save_dir, checkpoint_load) )\n self.reset_training_variables(is_online_demo=is_online_demo)\n else:\n raise ValueError(\"Unusual param combination for agent initialization.\")\n\n\n def init_always(self):\n self.device = torch.device(\"cuda\" if (self.use_cuda and torch.cuda.is_available()) else \"cpu\")\n\n self.writer = SummaryWriter(log_dir=BaseAgent.summary_dir(self.agent_save_dir))\n self.train_return = RunningMeanStats(self.summary_log_interval)\n\n if not os.path.exists(BaseAgent.summary_dir(self.agent_save_dir)):\n os.makedirs(BaseAgent.summary_dir(self.agent_save_dir))\n\n # NOTE: by default we do not want the action to be set externally\n # as this enables the agents to be plug and played in BARK.\n self._set_action_externally = False\n self._training_benchmark.reset(self._env, \\\n self.num_eval_steps, self.max_episode_steps, self)\n\n def reset_action_observer(self, env):\n self._observer = self._env._observer\n self._ml_behavior = self._env._ml_behavior\n\n def clean_pickables(self, pickables):\n del pickables[\"online_net\"]\n del pickables[\"target_net\"]\n del pickables[\"_env\"]\n del pickables[\"_training_benchmark\"]\n del pickables[\"device\"]\n del pickables[\"writer\"]\n\n\n def save_pickable_members(self, pickable_dir):\n if not os.path.exists(pickable_dir):\n os.makedirs(pickable_dir)\n pickables = dict(self.__dict__)\n self.clean_pickables(pickables)\n to_pickle(pickables, pickable_dir, \"agent_pickables\")\n\n def load_only_memory_pickle(self, agent_save_dir):\n logging.info(\"Pickling memory from: \" + BaseAgent.pickable_directory(agent_save_dir))\n pickables = from_pickle(BaseAgent.pickable_directory(agent_save_dir), \"agent_pickables\")\n self.__dict__['memory'] = pickables['memory']\n self._agent_save_dir = agent_save_dir\n\n def load_pickable_members(self, agent_save_dir):\n logging.info(\"Pickling agent from: \" + BaseAgent.pickable_directory(agent_save_dir))\n pickables = from_pickle(BaseAgent.pickable_directory(agent_save_dir), \"agent_pickables\")\n self.__dict__.update(pickables)\n self._agent_save_dir = agent_save_dir\n\n def reset_training_variables(self, is_online_demo=False):\n # Replay memory which is memory-efficient to store stacked frames.\n self.is_online_demo = is_online_demo\n if not self.is_learn_from_demonstrations:\n if self.use_per:\n beta_steps = (self.num_steps - self.start_steps) / \\\n self.update_interval\n self.memory = LazyPrioritizedMultiStepMemory(\n self.memory_size,\n self.observer.observation_space.shape,\n self.device,\n self.gamma,\n self.multi_step,\n beta_steps=beta_steps)\n else:\n self.memory = LazyMultiStepMemory(\n self.memory_size,\n self.observer.observation_space.shape,\n self.device,\n self.gamma,\n self.multi_step)\n else:\n # expect a learning from demonstrations setting, reset use_per to true\n self.use_per = True\n # do not reset memory if already loaded\n if not is_online_demo:\n beta_steps = (self.num_steps - self.start_steps) / \\\n self.update_interval\n # initially all memory expects only demo samples\n self.memory = LazyPrioritizedDemMultiStepMemory(\n self.memory_size,\n self.observer.observation_space.shape,\n self.device,\n self.gamma,\n self.multi_step,\n beta_steps=beta_steps,\n epsilon_demo=self.demonstrator_buffer_params[\"epsilon_demo\", \"\", 1.0],\n epsilon_alpha=self.demonstrator_buffer_params[\"epsilon_alpha\", \"\", 0.001],\n alpha=self.demonstrator_buffer_params[\"alpha\", \"\", 0.4],\n per_beta_steps=self.demonstrator_buffer_params[\"per_beta_steps\", \"\", 75000],\n per_beta=self.demonstrator_buffer_params[\"per_beta\", \"\", 0.6],\n demo_ratio=self.demonstrator_buffer_params[\"demo_ratio\", \"\", 1.0])\n\n self.steps = 0\n self.learning_steps = 0\n self.episodes = 0\n self.best_eval_results = None\n\n def reset_params(self, params):\n self.num_steps = params[\"ML\"][\"BaseAgent\"][\"NumSteps\", \"\", 5000000]\n self.batch_size = params[\"ML\"][\"BaseAgent\"][\"BatchSize\", \"\", 32]\n\n self.double_q_learning = params[\"ML\"][\"BaseAgent\"][\"Double_q_learning\", \"\", False]\n self.dueling_net = params[\"ML\"][\"BaseAgent\"][\"DuelingNet\", \"\", False]\n self.noisy_net = params[\"ML\"][\"BaseAgent\"][\"NoisyNet\", \"\", False]\n self.use_per = params[\"ML\"][\"BaseAgent\"][\"Use_per\", \"\", False]\n\n self.reward_log_interval = params[\"ML\"][\"BaseAgent\"][\"RewardLogInterval\", \"\", 5]\n self.summary_log_interval = params[\"ML\"][\"BaseAgent\"][\"SummaryLogInterval\", \"\", 100]\n self.eval_interval = params[\"ML\"][\"BaseAgent\"][\"EvalInterval\", \"\",\n 25000]\n self.num_eval_steps = params[\"ML\"][\"BaseAgent\"][\"NumEvalSteps\", \"\",\n 12500]\n self.gamma_n = params[\"ML\"][\"BaseAgent\"][\"Gamma\", \"\", 0.99] ** \\\n params[\"ML\"][\"BaseAgent\"][\"Multi_step\", \"\", 1]\n\n self.start_steps = params[\"ML\"][\"BaseAgent\"][\"StartSteps\", \"\", 5000]\n self.epsilon_train = LinearAnneaer(\n 1.0, params[\"ML\"][\"BaseAgent\"][\"EpsilonTrain\", \"\", 0.01],\n params[\"ML\"][\"BaseAgent\"][\"EpsilonDecaySteps\", \"\", 25000])\n self.epsilon_eval = params[\"ML\"][\"BaseAgent\"][\"EpsilonEval\", \"\",\n 0.001]\n self.update_interval = params[\"ML\"][\"BaseAgent\"][\"Update_interval\", \"\", 4]\n self.target_update_interval = params[\"ML\"][\"BaseAgent\"][\"TargetUpdateInterval\", \"\", 5000]\n self.max_episode_steps = params[\"ML\"][\"BaseAgent\"][\"MaxEpisodeSteps\", \"\", 10000]\n self.grad_cliping = params[\"ML\"][\"BaseAgent\"][\"GradCliping\", \"\", 5.0]\n\n self.memory_size = params[\"ML\"][\"BaseAgent\"][\"MemorySize\", \"\", 10**6]\n self.gamma = params[\"ML\"][\"BaseAgent\"][\"Gamma\", \"\", 0.99]\n self.multi_step = params[\"ML\"][\"BaseAgent\"][\"Multi_step\", \"\", 1]\n\n self.use_cuda = params[\"ML\"][\"BaseAgent\"][\"Cuda\", \"\", False]\n\n if self.is_learn_from_demonstrations:\n self.demonstrator_buffer_params = params.AddChild(\"ML\").AddChild(\"DemonstratorAgent\").AddChild(\"Buffer\")\n self.demonstrator_loss_params = params.AddChild(\"ML\").AddChild(\"DemonstratorAgent\").AddChild(\"Loss\")\n self.demonstrator_agent_params = params.AddChild(\"ML\").AddChild(\"DemonstratorAgent\").AddChild(\"Agent\")\n self.online_gradient_update_steps = self.demonstrator_agent_params[\"online_gradient_update_steps\", \"\", 75000]\n\n @property\n def observer(self):\n return self._observer\n\n @property\n def env(self):\n return self._env\n\n @property\n def ml_behavior(self):\n return self._ml_behavior\n\n @property\n def num_actions(self):\n return self.ml_behavior.action_space.n\n\n @property\n def agent_save_dir(self):\n return self._agent_save_dir\n\n def learn_from_demonstrations(self, demonstrations, learn_only=False, num_episodes=50000):\n if learn_only:\n self.demonstrations = demonstrations\n self.save(checkpoint_type=\"configured_with_demonstrations\")\n assert self.is_learn_from_demonstrations, \"Learn from demonstration params not set!\"\n assert self.demonstrations is not None, \"Run invoked incorrectly, demonstrations not found!\"\n\n # Extract and append demonstrations to memory\n self.load_demonstrations(demonstrations)\n\n self.train_on_demonstrations()\n\n # save trained online agent\n self.save(checkpoint_type=\"trained_only_demonstrations\")\n # if learn_only is False, load from previous training checkpoint, explore and learn\n else:\n # update agent dir to current agent dir\n self._agent_save_dir = os.path.join(self._params[\"Experiment\"][\"dir\"], \"agent\")\n self.writer = SummaryWriter(log_dir=BaseAgent.summary_dir(self.agent_save_dir))\n if not os.path.exists(BaseAgent.summary_dir(self.agent_save_dir)):\n os.makedirs(BaseAgent.summary_dir(self.agent_save_dir))\n logging.info(f\"Exploration learning DIR {self._agent_save_dir}\")\n logging.info(f\"Summaries DIR {BaseAgent.summary_dir(self._agent_save_dir)}\")\n ckp_dir = BaseAgent.check_point_directory(self._agent_save_dir, \"\")\n logging.info(f\"Checkpoints DIR {ckp_dir}\")\n pickle_dir = BaseAgent.pickable_directory(self._agent_save_dir)\n logging.info(f\"New Pickables at {pickle_dir}\")\n self.memory_size = self.memory.capacity\n self.memory.reset_offline(self.memory_size, self.observer.observation_space.shape, \n self.device,\n self.demonstrator_buffer_params[\"demo_ratio\"],\n per_beta_steps=self.demonstrator_buffer_params[\"per_beta_steps\"])\n logging.info(f\"Demo capacity: {self.memory.demo_capacity}/{self.memory.capacity}\")\n logging.info(f\"{self.memory._dn + 1} demo samples remaining...\")\n self.train_episodes(num_episodes=num_episodes)\n logging.info(f\"Total learning_steps/steps {self.learning_steps}/{self.steps}\")\n logging.info(f\"Self generated data last at {self.memory._an}\")\n self.save(checkpoint_type=\"trained_mixed_experiences\")\n\n def load_demonstrations(self, demonstrations):\n for demo in self.demonstrations:\n (state, action, reward, next_state, done, is_demo) = demo\n self.memory.append(state, action, reward, next_state, done, is_demo) \n\n def train_on_demonstrations(self):\n while True:\n self.train_step_interval(demo_only=True)\n logging.info(f\"Step {self.learning_steps} complete\")\n if self.learning_steps > self.online_gradient_update_steps:\n logging.info(f\"Initial gradient updates completed. Learning steps {self.learning_steps}\")\n break\n\n def train_episodes(self, num_episodes=50000):\n while True:\n self.train_episode()\n if self.episodes >= num_episodes:\n break\n\n def train(self):\n while True:\n self.train_episode()\n if self.steps > self.num_steps:\n break\n self.set_action_externally = True\n\n def is_update(self):\n return self.steps % self.update_interval == 0 \\\n and self.steps >= self.start_steps\n\n def is_random(self, eval=False):\n # Use e-greedy for evaluation.\n if self.steps < self.start_steps:\n return True\n if eval:\n return np.random.rand() < self.epsilon_eval\n if self.noisy_net:\n return False\n return np.random.rand() < self.epsilon_train.get()\n\n def update_target(self):\n self.target_net.load_state_dict(self.online_net.state_dict())\n\n def explore(self):\n # Act with randomness.\n action = self.ml_behavior.action_space.sample()\n return action\n\n @property\n def set_action_externally(self):\n return self._set_action_externally\n\n @set_action_externally.setter\n def set_action_externally(self, externally):\n self._set_action_externally = externally\n\n def ActionToBehavior(self, action):\n # NOTE: will either be set externally or internally\n self._action = action\n\n def Act(self, state):\n # Act without randomness.\n # state = torch.Tensor(state).unsqueeze(0).to(self._device).float()\n actions = self.calculate_actions(state).argmax().item()\n return actions\n\n def calculate_actions(self, state):\n # Act without randomness.\n state = torch.Tensor(state).unsqueeze(0).to(self.device).float()\n with torch.no_grad():\n actions = self.online_net(states=state) # pylint: disable=not-callable\n return actions\n\n def Plan(self, dt, observed_world):\n # NOTE: if training is enabled the action is set externally\n if not self.set_action_externally:\n observed_state = self.observer.Observe(observed_world)\n # if self.is_be_obs:\n # self.beliefs_info.append(self.observer.beliefs)\n action = self.Act(observed_state)\n self._action = action\n\n action = self._action\n # set action to be executed\n self._ml_behavior.ActionToBehavior(action)\n trajectory = self._ml_behavior.Plan(dt, observed_world)\n dynamic_action = self._ml_behavior.GetLastAction()\n # NOTE: BARK requires models to have trajectories of the past\n BehaviorModel.SetLastTrajectory(self, trajectory)\n BehaviorModel.SetLastAction(self, dynamic_action)\n return trajectory\n\n def save_beliefs_info(self, filename):\n import pandas as pd\n df = pd.DataFrame(self.beliefs_info)\n print(f\"Storing beliefs to {filename}\")\n df.to_pickle(filename)\n\n def learn(self):\n pass\n\n def Clone(self):\n return self\n\n @property\n def action_space(self):\n return self._ml_behavior.action_space\n\n @staticmethod\n def check_point_directory(agent_save_dir, checkpoint_type):\n return os.path.join(agent_save_dir, \"checkpoints/\", checkpoint_type)\n\n @staticmethod\n def pickable_directory(agent_save_dir):\n return os.path.join(agent_save_dir, \"pickable/\")\n\n @staticmethod\n def summary_dir(agent_save_dir):\n return os.path.join(agent_save_dir, \"summaries\")\n\n def save_models(self, checkpoint_dir):\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n torch.save(self.online_net.state_dict(),\n os.path.join(checkpoint_dir, 'online_net.pth'))\n torch.save(self.target_net.state_dict(),\n os.path.join(checkpoint_dir, 'target_net.pth'))\n online_net_script = torch.jit.script(self.online_net)\n online_net_script.save(os.path.join(checkpoint_dir, 'online_net_script.pt'))\n\n def save(self, checkpoint_type=\"last\"):\n self.save_models(BaseAgent.check_point_directory(self.agent_save_dir, checkpoint_type))\n self.save_pickable_members(BaseAgent.pickable_directory(self.agent_save_dir))\n\n def load_models(self, checkpoint_dir):\n try:\n self.online_net.load_state_dict(\n torch.load(os.path.join(checkpoint_dir, 'online_net.pth')))\n except RuntimeError:\n self.online_net.load_state_dict(\n torch.load(os.path.join(checkpoint_dir, 'online_net.pth'), map_location=torch.device('cpu')))\n try:\n self.target_net.load_state_dict(\n torch.load(os.path.join(checkpoint_dir, 'target_net.pth')))\n except RuntimeError:\n self.target_net.load_state_dict(\n torch.load(os.path.join(checkpoint_dir, 'target_net.pth'), map_location=torch.device('cpu')))\n\n def visualize(self, num_episodes=5):\n if not self.env:\n raise ValueError(\"No environment available for visualization. Was agent reloaded?\")\n for _ in range(0, num_episodes):\n state = self._env.reset()\n done = False\n while (not done):\n action = self.Act(state)\n next_state, reward, done, _ = self._env.step(action)\n self._env.render()\n state = next_state\n\n def train_episode(self):\n self.online_net.train()\n self.target_net.train()\n\n self.episodes += 1\n episode_return = 0.\n episode_steps = 0\n\n done = False\n state = self._env.reset()\n\n while (not done) and episode_steps <= self.max_episode_steps:\n # NOTE: Noises can be sampled only after self._learn(). However, I\n # sample noises before every action, which seems to lead better\n # performances.\n self.online_net.sample_noise()\n\n if self.is_random(eval=False):\n action = self.explore()\n else:\n action = self.Act(state)\n\n next_state, reward, done, _ = self._env.step(action)\n if self.episodes % self.reward_log_interval == 0:\n # self._env.render()\n logging.info(f\"Reward: {reward:<4}\")\n\n # To calculate efficiently, I just set priority=max_priority here.\n if self.is_learn_from_demonstrations:\n self.memory.append(state, action, reward, next_state, done, False)\n else:\n self.memory.append(state, action, reward, next_state, done)\n\n self.steps += 1\n episode_steps += 1\n episode_return += reward\n state = next_state\n\n self.train_step_interval()\n\n # We log running mean of stats.\n self.train_return.append(episode_return)\n\n # We log evaluation results along with training frames = 4 * steps.\n if self.episodes % self.summary_log_interval == 0:\n self.writer.add_scalar('return/train', self.train_return.get(),\n 4 * self.steps)\n\n logging.info(f'Episode: {self.episodes:<4} '\n f'episode steps: {episode_steps:<4} '\n f'return: {episode_return:<5.1f}')\n\n def train_step_interval(self, demo_only=False):\n if demo_only:\n self.online_net.train()\n self.target_net.train()\n self.steps += 1\n else:\n self.epsilon_train.step()\n if self.is_learn_from_demonstrations:\n self.memory.per_beta.step()\n\n if self.steps % self.target_update_interval == 0:\n self.update_target()\n\n if demo_only or self.is_update():\n self.learn()\n\n if self.steps % self.eval_interval == 0:\n self.evaluate()\n self.save(checkpoint_type='last')\n self.online_net.train()\n\n def evaluate(self):\n if not self._training_benchmark:\n logging.info(\"No evaluation performed since no training benchmark available.\")\n self.online_net.eval()\n\n eval_results, formatted_result = self._training_benchmark.run()\n\n if not self.best_eval_results or \\\n self._training_benchmark.is_better(eval_results, self.best_eval_results):\n self.best_eval_results = eval_results\n if self.is_learn_from_demonstrations and not self.is_online_demo:\n self.save(checkpoint_type='best_lfd')\n else:\n self.save(checkpoint_type='best')\n\n # We log evaluation results along with training frames = 4 * steps.\n for eval_result_name, eval_result in eval_results.items():\n if self.is_learn_from_demonstrations and not self.is_online_demo:\n self.writer.add_scalar(eval_result_name + \"_offline\", eval_result, 4 * self.steps)\n else:\n self.writer.add_scalar(eval_result_name, eval_result, 4 * self.steps)\n logging.info('-' * 60)\n logging.info('Evaluation result: {}'.format(formatted_result))\n logging.info('-' * 60)\n\n def __del__(self):\n self.writer.close()\n", "sub_path": "bark_ml/library_wrappers/lib_fqf_iqn_qrdqn/agent/base_agent.py", "file_name": "base_agent.py", "file_ext": "py", "file_size_in_byte": 22306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 43, "usage_type": "call"}, {"api_name": "bark.core.models.behavior.BehaviorModel", "line_number": 97, "usage_type": "name"}, {"api_name": "bark.core.models.behavior.BehaviorModel.__init__", "line_number": 101, "usage_type": "call"}, {"api_name": "bark.core.models.behavior.BehaviorModel", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 136, "usage_type": "call"}, {"api_name": "bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.RunningMeanStats", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 163, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 169, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 175, "usage_type": "call"}, {"api_name": "bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.memory.LazyPrioritizedMultiStepMemory", "line_number": 187, "usage_type": "call"}, {"api_name": "bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.memory.LazyMultiStepMemory", "line_number": 195, "usage_type": "call"}, {"api_name": "bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.memory.LazyPrioritizedDemMultiStepMemory", "line_number": 209, "usage_type": "call"}, {"api_name": "bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.LinearAnneaer", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path", "line_number": 306, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path", "line_number": 308, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 309, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 310, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 311, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 313, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 315, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 321, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 322, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 324, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 325, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 336, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 363, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 366, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 396, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 397, "usage_type": "call"}, {"api_name": "bark.core.models.behavior.BehaviorModel.SetLastTrajectory", "line_number": 416, "usage_type": "call"}, {"api_name": "bark.core.models.behavior.BehaviorModel", "line_number": 416, "usage_type": "name"}, {"api_name": "bark.core.models.behavior.BehaviorModel.SetLastAction", "line_number": 417, "usage_type": "call"}, {"api_name": "bark.core.models.behavior.BehaviorModel", "line_number": 417, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 422, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path", "line_number": 438, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path", "line_number": 446, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 450, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 454, "usage_type": "call"}, {"api_name": "os.path", "line_number": 454, "usage_type": "attribute"}, {"api_name": "torch.jit.script", "line_number": 455, "usage_type": "call"}, {"api_name": "torch.jit", "line_number": 455, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "attribute"}, {"api_name": "{'pd': 'pandas'}.check_point_directory", "line_number": 459, "usage_type": "call"}, {"api_name": "{'pd': 'pandas'}.pickable_directory", "line_number": 460, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 465, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 465, "usage_type": "call"}, {"api_name": "os.path", "line_number": 465, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path", "line_number": 468, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 468, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 471, "usage_type": "call"}, {"api_name": "os.path", "line_number": 471, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 474, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 474, "usage_type": "call"}, {"api_name": "os.path", "line_number": 474, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 474, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 513, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 536, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 563, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 582, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 583, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 584, "usage_type": "call"}]} +{"seq_id": "478456144", "text": "\"\"\"The learners module contains core classes and types for defining learner simulations.\n\nThis module contains the abstract interface expected for Learner implementations along\nwith a number of Learner implementations out of the box for testing and baseline comparisons.\n\"\"\"\n\nimport math\nimport collections\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Sequence, Tuple, Optional, Dict, cast, Generic, TypeVar, overload, Union, Callable\nfrom collections import defaultdict\n\nimport coba.vowpal as VW\n\nfrom coba.random import CobaRandom\nfrom coba.simulations import Context, Action, Reward, Choice, Key\nfrom coba.statistics import OnlineVariance\n\n_C_in = TypeVar('_C_in', bound=Context, contravariant=True)\n_A_in = TypeVar('_A_in', bound=Action , contravariant=True)\n\n_C_out = TypeVar('_C_out', bound=Context, covariant=True)\n_A_out = TypeVar('_A_out', bound=Action , covariant=True)\n\nclass Learner(Generic[_C_in, _A_in], ABC):\n \"\"\"The interface for Learner implementations.\"\"\"\n\n @property\n @abstractmethod\n def family(self) -> str:\n \"\"\"The family of the learner.\n\n This value is used for descriptive purposes only when creating benchmark results.\n \"\"\"\n ...\n\n @property\n @abstractmethod\n def params(self) -> Dict[str,Any]:\n \"\"\"The parameters used to initialize the learner.\n\n This value is used for descriptive purposes only when creating benchmark results.\n \"\"\"\n ...\n\n def init(self) -> None:\n \"\"\"An optional initialization method called once after pickling.\"\"\" \n pass\n\n @abstractmethod\n def choose(self, key: Key, context: _C_in, actions: Sequence[_A_in]) -> Choice:\n \"\"\"Choose which action to take.\n\n Args:\n key: A unique identifier for the interaction that the observed reward \n came from. This identifier allows learners to share information\n between the choose and learn methods while still keeping the overall \n learner interface consistent and clean.\n context: The current context. This argument will be None when playing \n a multi-armed bandit simulation and will contain context features \n when playing a contextual bandit simulation. Context features could \n be an individual number (e.g. 1.34), a string (e.g., \"hot\"), or a \n tuple of strings and numbers (e.g., (1.34, \"hot\")) depending on the \n simulation being played.\n actions: The current set of actions to choose from in the given context. \n Action sets can be lists of numbers (e.g., [1,2,3,4]), a list of \n strings (e.g. [\"high\", \"medium\", \"low\"]), or a list of lists such \n as in the case of movie recommendations (e.g., [[\"action\", \"oscar\"], \n [\"fantasy\", \"razzie\"]]).\n\n Returns:\n An integer indicating the index of the selected action in the action set.\n \"\"\"\n ...\n\n @abstractmethod\n def learn(self, key: Key, context: _C_in, action: _A_in, reward: Reward) -> None:\n \"\"\"Learn about the result of an action that was taken in a context.\n\n Args:\n key: A unique identifier for the interaction that the observed reward \n came from. This identifier allows learners to share information\n between the choose and learn methods while still keeping the overall \n learner interface consistent and clean.\n context: The current context. This argument will be None when playing \n a multi-armed bandit simulation and will contain context features \n when playing a contextual bandit simulation. Context features could \n be an individual number (e.g. 1.34), a string (e.g., \"hot\"), or a \n tuple of strings and numbers (e.g., (1.34, \"hot\")) depending on the \n simulation being played.\n action: The action that was selected to play and observe its reward. \n An Action can be an individual number (e.g., 2), a string (e.g. \n \"medium\"), or a list of some combination of numbers or strings\n (e.g., [\"action\", \"oscar\"]).\n reward: the reward received for taking the given action in the given context.\n \"\"\"\n ...\n\nclass LearnerFactory(Generic[_C_out, _A_out]):\n def __init__(self, ctor: Callable[...,Learner[_C_out,_A_out]], *args, **kwargs) -> None:\n self._ctor = ctor\n self._args = args\n self._kwargs = kwargs\n\n def create(self) -> Learner[_C_out,_A_out]:\n return self._ctor(*self._args, **self._kwargs)\n\nclass RandomLearner(Learner[Context, Action]):\n \"\"\"A Learner implementation that selects an action at random and learns nothing.\"\"\"\n\n def __init__(self, seed: Optional[int] = None):\n self._random = CobaRandom(seed)\n\n @property\n def family(self) -> str:\n \"\"\"The family of the learner.\n\n See the base class for more information\n \"\"\" \n return \"random\"\n\n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"The parameters of the learner.\n \n See the base class for more information\n \"\"\"\n return { }\n\n def choose(self, key: Key, context: Context, actions: Sequence[Action]) -> Choice:\n \"\"\"Choose a random action from the action set.\n \n Args:\n key: The key identifying the interaction we are choosing for.\n context: The context we're currently in. See the base class for more information.\n actions: The actions to choose from. See the base class for more information.\n\n Returns:\n The index of the selected action. See the base class for more information.\n \"\"\"\n return self._random.randint(0, len(actions)-1)\n\n def learn(self, key: Key, context: Context, action: Action, reward: Reward) -> None:\n \"\"\"Learns nothing.\n\n Args:\n key: The key identifying the interaction this observed reward came from.\n context: The context we're learning about. See the base class for more information.\n action: The action that was selected in the context. See the base class for more information.\n reward: The reward that was gained from the action. See the base class for more information.\n \"\"\"\n pass\n \nclass EpsilonLearner(Learner[Context, Action]):\n \"\"\"A learner using epsilon-greedy searching while smoothing observations into a context/context-action lookup table.\n\n Remarks:\n This algorithm does not use any function approximation to attempt to generalize observed rewards.\n \"\"\"\n\n def __init__(self, epsilon: float, include_context: bool = False, seed: Optional[int] = None) -> None:\n \"\"\"Instantiate an EpsilonLearner.\n\n Args:\n epsilon: A value between 0 and 1. We explore with probability epsilon and exploit otherwise.\n init: Our initial guess of the expected rewards for all context-action pairs.\n include_context: If true lookups are a function of context-action otherwise they are a function of action.\n \"\"\"\n\n self._epsilon = epsilon\n self._include_context = include_context\n self._random = CobaRandom(seed)\n\n self._N: Dict[Tuple[Context, Action], int ] = defaultdict(int)\n self._Q: Dict[Tuple[Context, Action], Optional[float]] = defaultdict(int)\n\n @property\n def family(self) -> str:\n \"\"\"The family of the learner.\n\n See the base class for more information\n \"\"\"\n if self._include_context:\n return \"cb_epsilongreedy\"\n else:\n return \"bandit_epsilongreedy\"\n\n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"The parameters of the learner.\n \n See the base class for more information\n \"\"\"\n return {\"epsilon\": self._epsilon }\n\n def choose(self, key: Key, context: Context, actions: Sequence[Action]) -> Choice:\n \"\"\"Choose greedily with probability 1-epsilon. Choose a randomly with probability epsilon.\n\n Args:\n key: The key identifying the interaction we are choosing for.\n context: The context we're currently in. See the base class for more information.\n actions: The actions to choose from. See the base class for more information.\n\n Returns:\n The index of the selected action. See the base class for more information.\n \"\"\"\n if(self._random.random() <= self._epsilon): return self._random.randint(0,len(actions)-1)\n\n keys = [ self._key(context,action) for action in actions ]\n values = [ self._Q[key] for key in keys ]\n max_value = None if set(values) == {None} else max(v for v in values if v is not None)\n max_indexes = [i for i in range(len(values)) if values[i]==max_value]\n\n return self._random.choice(max_indexes)\n\n def learn(self, key: Key, context: Context, action: Action, reward: Reward) -> None:\n \"\"\"Smooth the observed reward into our current estimate of either E[R|S,A] or E[R|A].\n\n Args:\n key: The key identifying the interaction this observed reward came from.\n context: The context we're learning about. See the base class for more information.\n action: The action that was selected in the context. See the base class for more information.\n reward: The reward that was gained from the action. See the base class for more information.\n \"\"\"\n\n sa_key = self._key(context,action)\n alpha = 1/(self._N[sa_key]+1)\n\n old_Q = cast(float, 0 if self._Q[sa_key] is None else self._Q[sa_key])\n\n self._Q[sa_key] = (1-alpha) * old_Q + alpha * reward\n self._N[sa_key] = self._N[sa_key] + 1\n\n def _key(self, context: Context, action: Action) -> Tuple[Context,Action]:\n return (context, action) if self._include_context else (None, action)\n\nclass UcbTunedLearner(Learner[Context, Action]):\n \"\"\"This is an implementation of Auer et al. (2002) UCB1-Tuned algorithm.\n\n This algorithm assumes that the reward distribution has support in [0,1].\n\n References:\n Auer, Peter, Nicolo Cesa-Bianchi, and Paul Fischer. \"Finite-time analysis of \n the multiarmed bandit problem.\" Machine learning 47.2-3 (2002): 235-256.\n \"\"\"\n def __init__(self, seed: int = None):\n \"\"\"Instantiate a UcbTunedLearner.\"\"\"\n\n self._init_a: int = 0\n self._t : int = 0\n self._s : Dict[Action, int ] = defaultdict(int)\n self._m : Dict[Action, float ] = {}\n self._v : Dict[Action, OnlineVariance] = defaultdict(OnlineVariance)\n \n self._random = CobaRandom(seed)\n\n @property\n def family(self) -> str:\n \"\"\"The family of the learner.\n\n See the base class for more information\n \"\"\"\n return \"bandit_UCB\"\n\n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"The parameters of the learner.\n \n See the base class for more information\n \"\"\"\n return { }\n\n def choose(self, key: Key, context: Context, actions: Sequence[Action]) -> Choice:\n \"\"\"Choose an action greedily by the upper confidence bound estimates.\n\n Args:\n key: The key identifying the interaction we are choosing for.\n context: The context we're currently in. See the base class for more information.\n actions: The actions to choose from. See the base class for more information.\n\n Returns:\n The index of the selected action. See the base class for more information.\n \"\"\"\n #we initialize by playing every action once\n if self._init_a < len(actions):\n self._init_a += 1\n return self._init_a-1\n\n else:\n values = [ self._m[a] + self._Avg_R_UCB(a) if a in self._m else None for a in actions ]\n max_value = None if set(values) == {None} else max(v for v in values if v is not None)\n max_indexes = [i for i in range(len(values)) if values[i]==max_value]\n return self._random.choice(max_indexes)\n\n def learn(self, key: Key, context: Context, action: Action, reward: Reward) -> None:\n \"\"\"Smooth the observed reward into our current estimate of E[R|A].\n\n Args:\n key: The key identifying the interaction this observed reward came from.\n context: The context we're learning about. See the base class for more information.\n action: The action that was selected in the context. See the base class for more information.\n reward: The reward that was gained from the action. See the base class for more information.\n \"\"\"\n\n if action not in self._m:\n self._m[action] = reward\n else:\n self._m[action] = (1-1/self._s[action]) * self._m[action] + 1/self._s[action] * reward\n\n self._t += 1\n self._s[action] += 1\n self._v[action].update(reward)\n\n def _Avg_R_UCB(self, action: Action) -> float:\n \"\"\"Produce the estimated upper confidence bound (UCB) for E[R|A].\n\n Args:\n action: The action for which we want to retrieve UCB for E[R|A].\n\n Returns:\n The estimated UCB for E[R|A].\n\n Remarks:\n See the beginning of section 4 in the algorithm's paper for this equation.\n \"\"\"\n ln = math.log; n = self._t; n_j = self._s[action]; V_j = self._Var_R_UCB(action)\n\n return math.sqrt(ln(n)/n_j * min(1/4,V_j))\n\n def _Var_R_UCB(self, action: Action) -> float:\n \"\"\"Produce the upper confidence bound (UCB) for Var[R|A].\n\n Args:\n action: The action for which we want to retrieve UCB for Var[R|A].\n\n Returns:\n The estimated UCB for Var[R|A].\n\n Remarks:\n See the beginning of section 4 in the algorithm's paper for this equation.\n \"\"\"\n ln = math.log; t = self._t; s = self._s[action]; var = self._v[action].variance\n\n return var + math.sqrt(2*ln(t)/s) \n \nclass VowpalLearner(Learner[Context, Action]):\n \"\"\"A learner using Vowpal Wabbit's contextual bandit command line interface.\n\n Remarks:\n This learner requires that the Vowpal Wabbit package be installed. This package can be\n installed via `pip install vowpalwabbit`. To learn more about solving contextual bandit\n problems with Vowpal Wabbit see https://vowpalwabbit.org/tutorials/contextual_bandits.html\n and https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms.\n \"\"\"\n\n @overload\n def __init__(self, *, epsilon: float, is_adf: bool = True, seed:int = None) -> None:\n \"\"\"Instantiate a VowpalLearner.\n Args:\n epsilon: A value between 0 and 1. If provided exploration will follow epsilon-greedy.\n \"\"\"\n ...\n\n @overload\n def __init__(self, *, bag: int, is_adf: bool = True, seed:int = None) -> None:\n \"\"\"Instantiate a VowpalLearner.\n Args:\n bag: An integer value greater than 0. This value determines how many separate policies will be\n learned. Each policy will be learned from bootstrap aggregation, making each policy unique. \n For each choice one policy will be selected according to a uniform distribution and followed.\n \"\"\"\n ...\n\n @overload\n def __init__(self, *, cover: int, seed:int = None) -> None:\n \"\"\"Instantiate a VowpalLearner.\n Args:\n cover: An integer value greater than 0. This value value determines how many separate policies will be\n learned. These policies are learned in such a way to explicitly optimize policy diversity in order\n to control exploration. For each choice one policy will be selected according to a uniform distribution\n and followed. For more information on this algorithm see Agarwal et al. (2014).\n References:\n Agarwal, Alekh, Daniel Hsu, Satyen Kale, John Langford, Lihong Li, and Robert Schapire. \"Taming \n the monster: A fast and simple algorithm for contextual bandits.\" In International Conference on \n Machine Learning, pp. 1638-1646. 2014.\n \"\"\"\n ...\n\n @overload\n def __init__(self, *, softmax:float, seed:int = None) -> None:\n \"\"\"Instantiate a VowpalLearner.\n Args:\n softmax: An exploration parameter with 0 indicating uniform exploration is desired and infinity\n indicating that no exploration is desired (aka, greedy action selection only). For more info\n see `lambda` at https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms.\n \"\"\"\n ...\n\n @overload\n def __init__(self,\n learning: VW.cb_explore,\n exploration: Union[VW.epsilongreedy, VW.bagging, VW.cover], *, seed:int = None) -> None:\n ...\n \n @overload\n def __init__(self,\n learning: VW.cb_explore_adf = VW.cb_explore_adf(),\n exploration: Union[VW.epsilongreedy, VW.softmax, VW.bagging] = VW.epsilongreedy(0.025), \n *, \n seed:int = None) -> None:\n ...\n\n def __init__(self, \n learning: Union[VW.cb_explore,VW.cb_explore_adf] = VW.cb_explore_adf(),\n exploration: Union[VW.epsilongreedy, VW.softmax, VW.bagging, VW.cover] = VW.epsilongreedy(0.025),\n **kwargs) -> None:\n \"\"\"Instantiate a VowpalLearner with the requested VW learner and exploration.\"\"\"\n\n self._learning: Union[VW.cb_explore,VW.cb_explore_adf]\n self._exploration: Union[VW.epsilongreedy, VW.softmax, VW.bagging, VW.cover]\n\n if 'epsilon' in kwargs:\n self._learning = VW.cb_explore_adf() if kwargs.get('is_adf',True) else VW.cb_explore()\n self._exploration = VW.epsilongreedy(kwargs['epsilon'])\n\n elif 'softmax' in kwargs:\n self._learning = VW.cb_explore_adf()\n self._exploration = VW.softmax(kwargs['softmax'])\n\n elif 'bag' in kwargs:\n self._learning = VW.cb_explore_adf() if kwargs.get('is_adf',True) else VW.cb_explore()\n self._exploration = VW.bagging(kwargs['bag'])\n \n elif 'cover' in kwargs:\n self._learning = VW.cb_explore()\n self._exploration = VW.cover(kwargs['cover'])\n \n else:\n self._learning = learning\n self._exploration = exploration\n\n self._probs: Dict[Key, float] = {}\n self._actions = self._new_actions(self._learning)\n\n self._flags = kwargs.get('flags', '')\n\n self._vw = VW.pyvw_Wrapper(self._learning.formatter, seed=kwargs.get('seed', None))\n\n @property\n def family(self) -> str:\n \"\"\"The family of the learner.\n\n See the base class for more information\n \"\"\"\n return f\"vw_{self._learning.__class__.__name__}_{self._exploration.__class__.__name__}\"\n \n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"The parameters of the learner.\n \n See the base class for more information\n \"\"\" \n return {**self._learning.params(), **self._exploration.params()} \n\n def choose(self, key: Key, context: Context, actions: Sequence[Action]) -> Choice:\n \"\"\"Choose an action according to the VowpalWabbit parameters passed into the contructor.\n\n Args:\n key: The key identifying the interaction we are choosing for.\n context: The context we're currently in. See the base class for more information.\n actions: The actions to choose from. See the base class for more information.\n\n Returns:\n \n The index of the selected action. See the base class for more information.\n \"\"\"\n\n if not self._vw.created:\n self._vw.create(self._learning.flags(actions) + \" \" + self._exploration.flags() + \" \" + self._flags)\n\n choice, prob = self._vw.choose(context, actions)\n\n self._set_actions(key,actions)\n self._probs[key] = prob\n \n if isinstance(self._learning, VW.cb_explore):\n return actions.index(self._actions[choice])\n else:\n return choice\n\n def learn(self, key: Key, context: Context, action: Action, reward: Reward) -> None:\n \"\"\"Learn from the obsered reward for the given context action pair.\n\n Args:\n key: The key identifying the interaction this observed reward came from.\n context: The context we're learning about. See the base class for more information.\n action: The action that was selected in the context. See the base class for more information.\n reward: The reward that was gained from the action. See the base class for more information.\n \"\"\"\n\n actions = self._get_actions(key)\n prob = self._probs[key] \n\n self._vw.learn(prob, actions, context, action, reward)\n\n def _new_actions(self, learning) -> Any:\n if isinstance(learning, VW.cb_explore):\n return []\n else:\n return {}\n\n def _set_actions(self, key, actions) -> None:\n if self._actions == []:\n self._actions = actions\n\n if isinstance(self._actions, collections.MutableMapping):\n self._actions[key] = actions\n\n def _get_actions(self, key) -> Sequence[Action]:\n if isinstance(self._actions, collections.MutableMapping) :\n return self._actions.pop(key)\n else:\n return self._actions\n\nclass CorralLearner(Learner[Context, Action]):\n\n def __init__(self, base_learners: Sequence[Learner[Context,Action]], eta: float, T: float = math.inf, seed: int = None) -> None:\n \n self._base_learners = base_learners\n\n M = len(self._base_learners)\n\n self._M = M\n self._gamma = 1/T\n self._beta = 1/math.exp(1/math.log(T))\n\n self._eta_init = eta\n self._etas = [ eta ] * M\n self._rhos = [ float(2*M) ] * M\n self._ps = [ 1/M ] * M\n self._p_bars = [ 1/M ] * M\n\n self._random = CobaRandom(seed)\n self._chosen_i: Dict[Key,int] = {}\n\n @property\n def family(self) -> str:\n \"\"\"The family of the learner.\n\n See the base class for more information\n \"\"\"\n return \"corral\"\n \n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"The parameters of the learner.\n \n See the base class for more information\n \"\"\" \n return {\"eta\": self._eta_init, \"B\": [ b.family for b in self._base_learners ] }\n\n def choose(self, key: Key, context: _C_in, actions: Sequence[_A_in]) -> Choice:\n\n thetas = [ base_algorithm.choose(key, context, actions) for base_algorithm in self._base_learners ]\n\n i = self._random.choice(range(self._M), self._p_bars)\n\n self._chosen_i[key] = i\n\n return thetas[i]\n\n def learn(self, key: Key, context: _C_in, action: _A_in, reward: Reward) -> None:\n\n loss = 1-reward # this assumes reward \\in [0,1]\n chosen_i = self._chosen_i.pop(key)\n\n rewards = [ reward/self._p_bars[chosen_i] * int(i == chosen_i) for i in range(self._M)]\n losses = [ loss/self._p_bars[chosen_i] * int(i == chosen_i) for i in range(self._M)]\n\n for learner,reward in zip(self._base_learners, rewards):\n learner.learn(key, context, action, reward)\n\n self._ps = list(self._log_barrier_omd(losses))\n self._p_bars = [ (1-self._gamma)*p + self._gamma*1/self._M for p in self._ps ]\n\n for i in range(self._M):\n if 1/self._p_bars[i] > self._rhos[i]:\n self._rhos[i] = 2/self._p_bars[i]\n self._etas[i] *= self._beta\n\n def _log_barrier_omd(self, losses) -> Sequence[float]:\n\n f = lambda l: float(sum( [ 1/((1/p) + eta*(loss-l)) for p, eta, loss in zip(self._ps, self._etas, losses)]))\n df = lambda l: float(sum( [ eta/((1/p) + eta*(loss-l))**2 for p, eta, loss in zip(self._ps, self._etas, losses)]))\n\n min_loss = min(losses)\n max_loss = max(losses)\n\n if max_loss - 1 < .0001:\n l = float(1)\n elif max_loss > 2000:\n l = float(0)\n else:\n l = (min_loss + max_loss)/2\n\n i = 0\n\n while True:\n\n i += 1\n\n #try:\n if l < min_loss or l > max_loss:\n l = min_loss + self._random.random() * (max_loss-min_loss)\n\n if df(l) == 0:\n print('what happened?')\n\n l = l - (f(l)-1)/df(l)\n\n if round(f(l)-1,5) == 0:\n break\n #except:\n # l = min_loss + self._random.random() * (max_loss-min_loss)\n\n if (i % 30000) == 0:\n print(i)\n\n return [ 1/((1/p) + eta*(loss-l)) for p, eta, loss in zip(self._ps, self._etas, losses)]", "sub_path": "coba/learners.py", "file_name": "learners.py", "file_ext": "py", "file_size_in_byte": 25070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "typing.TypeVar", "line_number": 20, "usage_type": "call"}, {"api_name": "coba.simulations.Context", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 21, "usage_type": "call"}, {"api_name": "coba.simulations.Action", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 23, "usage_type": "call"}, {"api_name": "coba.simulations.Context", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 24, "usage_type": "call"}, {"api_name": "coba.simulations.Action", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 26, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 26, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 30, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 40, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 52, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 51, "usage_type": "name"}, {"api_name": "coba.simulations.Choice", "line_number": 52, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 78, "usage_type": "name"}, {"api_name": "coba.simulations.Reward", "line_number": 78, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 101, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 109, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 112, "usage_type": "name"}, {"api_name": "coba.random.CobaRandom", "line_number": 113, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 124, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 131, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 131, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 131, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 131, "usage_type": "name"}, {"api_name": "coba.simulations.Choice", "line_number": 131, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 144, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 144, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 144, "usage_type": "name"}, {"api_name": "coba.simulations.Reward", "line_number": 144, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 155, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 162, "usage_type": "name"}, {"api_name": "coba.random.CobaRandom", "line_number": 173, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 175, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 175, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 175, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 175, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 176, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 176, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 176, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 176, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 190, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 190, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 197, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 197, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 197, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 197, "usage_type": "name"}, {"api_name": "coba.simulations.Choice", "line_number": 197, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 217, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 217, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 217, "usage_type": "name"}, {"api_name": "coba.simulations.Reward", "line_number": 217, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 230, "usage_type": "call"}, {"api_name": "coba.simulations.Context", "line_number": 235, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 235, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 235, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 238, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 238, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 252, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 252, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 252, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 253, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 253, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 254, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 254, "usage_type": "name"}, {"api_name": "coba.statistics.OnlineVariance", "line_number": 254, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 254, "usage_type": "call"}, {"api_name": "coba.random.CobaRandom", "line_number": 256, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 267, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 267, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 274, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 274, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 274, "usage_type": "name"}, {"api_name": "coba.simulations.Choice", "line_number": 274, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 296, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 296, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 296, "usage_type": "name"}, {"api_name": "coba.simulations.Reward", "line_number": 296, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 315, "usage_type": "name"}, {"api_name": "math.log", "line_number": 327, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 329, "usage_type": "call"}, {"api_name": "coba.simulations.Action", "line_number": 331, "usage_type": "name"}, {"api_name": "math.log", "line_number": 343, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 345, "usage_type": "call"}, {"api_name": "coba.simulations.Context", "line_number": 347, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 347, "usage_type": "name"}, {"api_name": "typing.overload", "line_number": 357, "usage_type": "name"}, {"api_name": "typing.overload", "line_number": 365, "usage_type": "name"}, {"api_name": "typing.overload", "line_number": 375, "usage_type": "name"}, {"api_name": "typing.overload", "line_number": 390, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 402, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 402, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 403, "usage_type": "name"}, {"api_name": "coba.vowpal.epsilongreedy", "line_number": 403, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 403, "usage_type": "name"}, {"api_name": "coba.vowpal.bagging", "line_number": 403, "usage_type": "attribute"}, {"api_name": "coba.vowpal.cover", "line_number": 403, "usage_type": "attribute"}, {"api_name": "typing.overload", "line_number": 400, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore_adf", "line_number": 408, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 408, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 409, "usage_type": "name"}, {"api_name": "coba.vowpal.epsilongreedy", "line_number": 409, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 409, "usage_type": "name"}, {"api_name": "coba.vowpal.softmax", "line_number": 409, "usage_type": "attribute"}, {"api_name": "coba.vowpal.bagging", "line_number": 409, "usage_type": "attribute"}, {"api_name": "typing.overload", "line_number": 406, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 415, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 415, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 415, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore_adf", "line_number": 415, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 416, "usage_type": "name"}, {"api_name": "coba.vowpal.epsilongreedy", "line_number": 416, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 416, "usage_type": "name"}, {"api_name": "coba.vowpal.softmax", "line_number": 416, "usage_type": "attribute"}, {"api_name": "coba.vowpal.bagging", "line_number": 416, "usage_type": "attribute"}, {"api_name": "coba.vowpal.cover", "line_number": 416, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 420, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 420, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 420, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore_adf", "line_number": 420, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 421, "usage_type": "name"}, {"api_name": "coba.vowpal.epsilongreedy", "line_number": 421, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 421, "usage_type": "name"}, {"api_name": "coba.vowpal.softmax", "line_number": 421, "usage_type": "attribute"}, {"api_name": "coba.vowpal.bagging", "line_number": 421, "usage_type": "attribute"}, {"api_name": "coba.vowpal.cover", "line_number": 421, "usage_type": "attribute"}, {"api_name": "coba.vowpal.cb_explore_adf", "line_number": 424, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 424, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 424, "usage_type": "call"}, {"api_name": "coba.vowpal.epsilongreedy", "line_number": 425, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 425, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore_adf", "line_number": 428, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 428, "usage_type": "name"}, {"api_name": "coba.vowpal.softmax", "line_number": 429, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 429, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore_adf", "line_number": 432, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 432, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 432, "usage_type": "call"}, {"api_name": "coba.vowpal.bagging", "line_number": 433, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 433, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 436, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 436, "usage_type": "name"}, {"api_name": "coba.vowpal.cover", "line_number": 437, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 437, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 443, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 443, "usage_type": "name"}, {"api_name": "coba.vowpal.pyvw_Wrapper", "line_number": 448, "usage_type": "call"}, {"api_name": "coba.vowpal", "line_number": 448, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 459, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 459, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 466, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 466, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 466, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 466, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 487, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 487, "usage_type": "name"}, {"api_name": "coba.simulations.Choice", "line_number": 466, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 492, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 492, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 492, "usage_type": "name"}, {"api_name": "coba.simulations.Reward", "line_number": 492, "usage_type": "name"}, {"api_name": "coba.vowpal.cb_explore", "line_number": 508, "usage_type": "attribute"}, {"api_name": "coba.vowpal", "line_number": 508, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 507, "usage_type": "name"}, {"api_name": "collections.MutableMapping", "line_number": 517, "usage_type": "attribute"}, {"api_name": "collections.MutableMapping", "line_number": 521, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 520, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 520, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 526, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 526, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 528, "usage_type": "name"}, {"api_name": "coba.simulations.Context", "line_number": 528, "usage_type": "name"}, {"api_name": "coba.simulations.Action", "line_number": 528, "usage_type": "name"}, {"api_name": "math.inf", "line_number": 528, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 536, "usage_type": "call"}, {"api_name": "math.log", "line_number": 536, "usage_type": "call"}, {"api_name": "coba.random.CobaRandom", "line_number": 544, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 545, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 545, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 556, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 556, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 563, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 563, "usage_type": "name"}, {"api_name": "coba.simulations.Choice", "line_number": 563, "usage_type": "name"}, {"api_name": "coba.simulations.Key", "line_number": 573, "usage_type": "name"}, {"api_name": "coba.simulations.Reward", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 592, "usage_type": "name"}]} +{"seq_id": "489920155", "text": "from django.shortcuts import render,redirect,get_object_or_404,HttpResponse,Http404\r\nfrom django.http import JsonResponse\r\nfrom django.views.generic.base import View\r\nfrom django.urls import reverse\r\nfrom users.models import UserProfile,Plan,ExamTemplate,Exam,Question,Knowledge,ExamRecord\r\nfrom django import forms\r\nfrom .forms import EditorUserForm,AddUserForm,AddExamTemplateForm,EditorPlanForm,AddPlanForm,EditorExamTemplateForm,ChangeUserInfoForm,QuestionInfoForm,EditorQuestionForm\r\nfrom django.core.paginator import Paginator\r\nfrom . import settings\r\nfrom datetime import datetime\r\nimport json\r\nimport random\r\nimport xlwt\r\nfrom io import BytesIO\r\nfrom django.contrib.auth.hashers import make_password\r\n\r\nclass HomeView(View):\r\n def get(self,request):\r\n\r\n return render(request, 'home.html', {})\r\n\r\nclass StudentCheckGradeView(View):\r\n def get(self,request):\r\n content = {}\r\n query_form = request.session.get('query_grade_form', {})\r\n if query_form:\r\n content.update(request.session.get('query_grade_form', {}))\r\n del request.session['query_grade_form']\r\n exam_records = ExamRecord.objects.all()\r\n if query_form['subject'] !=\"all\":\r\n exam_records = ExamRecord.objects.filter(exam__exam_template__subject=query_form['subject'])\r\n if query_form['exam_th'] != \"all\":\r\n exam_records = exam_records.filter(exam__exam_template__exam_th=query_form['exam_th'])\r\n else:\r\n exam_records = ExamRecord.objects.all()\r\n if exam_records.count():\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content = get_model_list_data(request, exam_records)\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_exam_ths'] = []\r\n content['last_query_form'] = query_form\r\n for exam_record in exam_records:\r\n content['all_exam_ths'].append(exam_record.exam.exam_template.exam_th)\r\n\r\n\r\n return render(request, 'check_grade.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['subject'] = request.POST.get('subject', '')\r\n query['exam_th'] = request.POST.get('exam_th', '')\r\n request.session['query_grade_form'] = query\r\n return redirect(reverse('check_grade'))\r\n\r\n\r\nclass StudentIndexView(View):\r\n def get(self,request):\r\n if request.is_ajax(): #ajax 查询考试\r\n data = {}\r\n subject = request.GET.get('subject', '')\r\n exams = Exam.objects.filter(exam_template__subject=subject,allot__idcard=request.user.idcard)\r\n data['exams'] = []\r\n for exam in exams:\r\n dic = {}\r\n dic['pk'] = exam.pk\r\n dic['exam_name'] = exam.exam_name\r\n dic['major'] = exam.major\r\n dic['start_time'] = exam.exam_template.exam_start_time\r\n dic['end_time'] = exam.exam_template.exam_end_time\r\n data['exams'].append(dic)\r\n data['status'] = 'SUCCESS'\r\n data['exam_num'] = len(exams)\r\n return JsonResponse(data)\r\n content = {}\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n # if request.session.get('add_question_content',''):\r\n # content.update(request.session.get('add_question_content'))\r\n # del request.session['add_question_content'] #取到session后删除,否则后续会一直取到\r\n return render(request,'student_index.html',content)\r\n def post(self,request):\r\n if request.is_ajax(): #ajax 查询考试说明\r\n data = {}\r\n exam_id = request.POST.get('exam_id', '')\r\n exam = Exam.objects.filter(pk=exam_id).first()\r\n data['exam_describe'] = exam.exam_describe\r\n data['exam_subject'] = exam.exam_template.subject\r\n data['exam_start_time'] = exam.exam_template.exam_start_time\r\n data['exam_id'] = exam.id\r\n data['status'] = 'SUCCESS'\r\n return JsonResponse(data)\r\n\r\n\r\nclass ExamingView(View):\r\n def get(self,request):\r\n content = {}\r\n exam_id = request.GET.get('exam_id', '')\r\n user_id = request.GET.get('user_id', '')\r\n exam = Exam.objects.filter(pk=exam_id).first() #正在考的试卷\r\n dic = json.loads(exam.question_id_ls) # 教学::只有dumps后的字符串才能进行loads,否则报错,直接将dict str的字符串也会报错\r\n\r\n\r\n all_questions = {'single_choice_objs': [], 'mul_choice_objs': [], 'judge_objs': []}\r\n for pk in dic['single_choice_id_ls']:\r\n all_questions['single_choice_objs'].append(Question.objects.filter(pk=pk).first())\r\n for pk in dic['mul_choice_id_ls']:\r\n all_questions['mul_choice_objs'].append(Question.objects.filter(pk=pk).first())\r\n for pk in dic['judge_id_ls']:\r\n all_questions['judge_objs'].append(Question.objects.filter(pk=pk).first())\r\n\r\n content.update(all_questions)\r\n content['exam'] = exam\r\n content['single_choice_num'] = len(all_questions['single_choice_objs'])\r\n content['mul_choice_num'] = len(all_questions['mul_choice_objs'])\r\n content['judge_num'] = len(all_questions['judge_objs'])\r\n content['question_num'] = len(all_questions['single_choice_objs'])+len(all_questions['mul_choice_objs'])+len(all_questions['judge_objs'])\r\n return render(request, 'examing.html', content)\r\n def post(self,request):\r\n if request.is_ajax():\r\n\r\n examed_score = 0\r\n exam_id = request.POST.get('exam_id','')\r\n exam = Exam.objects.filter(pk=exam_id).first()\r\n\r\n right_answer = []\r\n user_answer = []\r\n for k,v in request.POST.items():\r\n print(k)\r\n if k.count('answer'):\r\n question_id = k.split('_')[-1]\r\n\r\n question = Question.objects.filter(pk=question_id).first()\r\n right_answer.append((k,question.answer))\r\n user_answer.append((question_id,v))\r\n if question.answer.lower().strip() == v.lower().strip(): #当答案正确时\r\n if question.question_type == \"单选题\":\r\n score = exam.exam_template.plan.single_choice_score\r\n elif question.question_type == \"多选题\":\r\n score = exam.exam_template.plan.mul_choice_score\r\n else:\r\n score = exam.exam_template.plan.judge_score\r\n\r\n question.score = score\r\n question.save()\r\n examed_score += score\r\n ExamRecord.objects.create(exam_id=exam_id,user_id=request.user.id,answer=str(user_answer),grade=examed_score)\r\n data = {}\r\n data['examed_score'] = examed_score\r\n data['status'] = 'SUCCESS'\r\n data['right_answer'] = right_answer\r\n return JsonResponse(data)\r\n return render(request, 'examing.html', {})\r\n\r\nclass QuestionStoreView(View): #题库管理\r\n def get(self,request): #题库列表\r\n if request.is_ajax(): #删除\r\n question_id = request.GET.get('obj_unique', '')\r\n question = Question.objects.filter(pk=question_id).first()\r\n if question:\r\n question.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该知识点不存在')\r\n content = {}\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_knowledges'] = Knowledge.objects.all()\r\n content['all_difficults'] = ('简单', '一般', '困难', '极难')\r\n\r\n if request.session.get('add_question_store_content',''):\r\n content.update(request.session.get('add_question_store_content'))\r\n del request.session['add_question_store_content'] #取到session后删除,否则后续会一直取到\r\n if request.session.get('editoe_question_content',''):\r\n content.update(request.session.get('editoe_question_content'))\r\n del request.session['editoe_question_content'] #取到session后删除,��则后续会一直取到\r\n\r\n questions = Question.objects.all()\r\n content.update(get_model_list_data(request, questions))\r\n return render(request,'question_store.html',content)\r\n def post(self,request): #批量导入,编辑\r\n content = {}\r\n file = request.FILES.get('question_store', None)\r\n if file:\r\n with file as f:\r\n questions_str = f.read().decode('utf8') # 读取的是bytes类型,解码为中文字符串\r\n questions_str = questions_str.replace('\\r\\n','').replace('\\'','')\r\n questions_ls = json.loads(questions_str) #将字符串转为pytoon数据类型\r\n error_add_ls = []\r\n for question in questions_ls: # 遍历每个问题字典\r\n question_form = QuestionInfoForm(question)\r\n if question_form.is_valid():\r\n try:\r\n knowledge_name = question['knowledge_name']\r\n\r\n question_name = question['question_name']\r\n if Question.objects.filter(question_name=question_name).exists():\r\n raise forms.ValidationError('该问题已存在')\r\n\r\n\r\n subject = question['subject']\r\n level = question['level']\r\n major = question['major']\r\n knowledge,iscreate = Knowledge.objects.get_or_create(knowledge_name=knowledge_name,\r\n subject=subject,\r\n level=level,\r\n major=major)\r\n\r\n dic = {'question_name': question_name,\r\n 'option': question['option'],\r\n 'answer': question['answer'],\r\n 'score': int(question['score']),\r\n 'subject': subject,\r\n 'question_type': question['question_type'],\r\n 'difficult': question['difficult'],\r\n 'knowledge': knowledge}\r\n\r\n question = Question.objects.create(**dic)\r\n except Exception as e:\r\n error_add_ls.append(str(e))\r\n else:\r\n error = question_form.errors\r\n error_add_ls.append(error)\r\n content['error_add_ls'] = error_add_ls if error_add_ls else ''\r\n\r\n # UserProfile.objects.bulk_create(ls) #批量添加\r\n store_name = f'media/{request.user.username}_{file.name}'\r\n with open(store_name, 'wb') as store: # 将上传的文件保存下来\r\n for chunk in file.chunks():\r\n store.write(chunk) # 此处必须写入bytes类型\r\n request.session['add_question_store_content'] = content\r\n return redirect(reverse('question_store'))\r\n question_form = EditorQuestionForm(request.POST)\r\n if question_form.is_valid():\r\n old_question_name = question_form.data['old_question_name']\r\n question = Question.objects.filter(question_name=old_question_name).first()\r\n\r\n question.question_name = question_form.data['questionName']\r\n question.answer = question_form.cleaned_data['answer']\r\n question.score = int(question_form.cleaned_data['score'])\r\n question.question_type = question_form.cleaned_data['question_type']\r\n question.subject = question_form.cleaned_data['subject']\r\n question.difficult = question_form.cleaned_data['difficult']\r\n\r\n knowledge_name = question_form.cleaned_data['knowledge_name']\r\n konwledge = Knowledge.objects.filter(knowledge_name=knowledge_name).first()\r\n question.knowledge = konwledge\r\n\r\n question.save()\r\n content['success_change_msg'] = '修改成功'\r\n else:\r\n content['error_change_msg'] = '修改失败'\r\n request.session['editoe_question_content'] = content\r\n return redirect(reverse('question_store'))\r\n\r\nclass QueryQuestionView(View): #查询方案\r\n def get(self,request):\r\n content = {}\r\n query_form = request.session.get('query_question_form','')\r\n knowledge_name = query_form['knowledge']\r\n knowledge = Knowledge.objects.filter(knowledge_name=knowledge_name).first()\r\n if query_form:\r\n del request.session['query_question_form']\r\n questions = Question.objects.filter(subject=query_form['subject'],difficult=query_form['difficult'],knowledge=knowledge).first()\r\n content.update(get_model_list_data(request, questions))\r\n if not questions.count(): #如果查询结果为空\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'question_store.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['subject'] = request.POST.get('subject','')\r\n query['knowledge'] = request.POST.get('knowledge', '')\r\n query['difficult'] = request.POST.get('difficult', '')\r\n request.session['query_question_form'] = query\r\n return redirect(reverse('question_query'))\r\n\r\nclass AddQuestionView(View):\r\n def get(self,request):\r\n content = {}\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_question_types'] = ('单选题', '多选题', '判断题')\r\n content['all_difficults'] = ('简单', '一般', '困难', '极难')\r\n content['all_knowledges'] = Knowledge.objects.all()\r\n if request.session.get('add_question_content',''):\r\n content.update(request.session.get('add_question_content'))\r\n del request.session['add_question_content'] #取到session后删除,否则后续会一直取到\r\n return render(request,'teacher_index.html',content)\r\n def post(self,request):\r\n content = {}\r\n subject = request.POST.get('subject','')\r\n question_type = request.POST.get('question_type', '')\r\n difficult = request.POST.get('difficult', '')\r\n knowledge = request.POST.get('knowledge', '')\r\n knowledge = Knowledge.objects.filter(knowledge_name=knowledge).first()\r\n\r\n question_name = request.POST.get('question_name', '')\r\n answer = request.POST.get('answer', '')\r\n score = request.POST.get('score', '')\r\n option_a = request.POST.get('option_a', '')\r\n option_b = request.POST.get('option_b', '')\r\n option_c = request.POST.get('option_c', '')\r\n option_d = request.POST.get('option_d', '')\r\n if question_type == \"判断题\":\r\n question = Question.objects.create(question_name=question_name,answer=answer,score=score,\r\n subject=subject,question_type=question_type,\r\n difficult=difficult,knowledge=knowledge)\r\n else:\r\n option = f\"A、{option_a}\\nB、{option_b}\\nC、{option_c}\\nD、{option_d}\"\r\n question = Question.objects.create(question_name=question_name, answer=answer, score=score,option=option,\r\n subject=subject, question_type=question_type,\r\n difficult=difficult, knowledge=knowledge)\r\n content['success_create_msg'] = '创建成功'\r\n request.session['add_question_content'] = content\r\n return redirect(reverse('add_question'))\r\n\r\nclass KnowledgeManageView(View):\r\n def get(self,request): #知识点列表\r\n if request.is_ajax(): #删除\r\n knowledge_id = request.GET.get('obj_unique', '')\r\n knowledge = Knowledge.objects.filter(pk=knowledge_id).first()\r\n if knowledge:\r\n knowledge.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该知识点不存在')\r\n\r\n content = {}\r\n content['all_levels'] = ('本科', '专科')\r\n content['all_majors'] = ('软件工程', '法学', '数学')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n if request.session.get('add_knowledge_content',''):\r\n content.update(request.session.get('add_knowledge_content'))\r\n del request.session['add_knowledge_content'] #取到session后删除,否则后续会一直取到\r\n knowledges = Knowledge.objects.all()\r\n content.update(get_model_list_data(request, knowledges))\r\n return render(request,'knowledge_manage.html',content)\r\n def post(self,request): #添加知识点\r\n content = {}\r\n knowledge_name = request.POST.get('knowledge_name','')\r\n if Knowledge.objects.filter(knowledge_name=knowledge_name).count():\r\n content['error_create_msg'] = '该知识点名重复'\r\n else:\r\n level = request.POST.get('level', '')\r\n major = request.POST.get('major', '')\r\n subject = request.POST.get('subject', '')\r\n knowledge = Knowledge.objects.create(knowledge_name=knowledge_name,level=level,major=major,subject=subject)\r\n content['success_create_msg'] = f'知识点:{knowledge_name} 创建成功'\r\n request.session['add_knowledge_content'] = content\r\n return redirect(reverse('knowledge_manage'))\r\n\r\nclass QueryKnowledgeView(View): #查询试卷模板\r\n def get(self,request):\r\n content = {}\r\n content['all_levels'] = ('本科', '专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程', '法学', '数学')\r\n\r\n knowledges = Knowledge.objects.all()\r\n query_form = request.session.get('query_knowledge_form', '')\r\n query_knowledge = Knowledge.objects.filter(level=query_form.get('level',''),major=query_form.get('major',''),subject=query_form.get('subject',''))\r\n\r\n\r\n if not query_knowledge.count(): # 当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n else:\r\n content.update(get_model_list_data(request, query_knowledge))\r\n\r\n content['last_query_form'] = query_form\r\n return render(request, 'knowledge_manage.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['level'] = request.POST.get('level', '')\r\n query['major'] = request.POST.get('major', '')\r\n query['subject'] = request.POST.get('subject', '')\r\n request.session['query_knowledge_form'] = query\r\n return redirect(reverse('knowledge_query'))\r\n\r\nclass PersonalInfo(View):\r\n def get(self,request):\r\n content = {}\r\n if request.session.get('editor_user_content',''):\r\n content.update(request.session.get('editor_user_content',''))\r\n del request.session['editor_user_content']\r\n if request.user.role == \"student\":\r\n return render(request, 'student_personal_info.html', content)\r\n elif request.user.role == \"teacher\":\r\n return render(request, 'teacher_personal_info.html', content)\r\n else:\r\n return render(request, 'admin_personal_info.html', content)\r\n def post(self,request):\r\n content = {}\r\n #修改头像\r\n if request.FILES.get('user_touxiang',''):\r\n img = request.FILES.get('user_touxiang','')\r\n request.user.touxiang = img\r\n request.user.save()\r\n return redirect(reverse('personal_info'))\r\n #修改密码\r\n if request.is_ajax():\r\n old_pwd = request.POST.get('old_pwd','')\r\n new_pwd = request.POST.get('new_pwd', '')\r\n again_pwd = request.POST.get('new_pwd', '')\r\n if request.user.check_password(old_pwd): #如果旧密码检查正确\r\n error_msg = ''\r\n leng = len(new_pwd)\r\n if new_pwd!=again_pwd:\r\n error_msg = '两次密码不匹配'\r\n\r\n elif len(new_pwd)>16 or len(new_pwd)<6:\r\n error_msg = '密码长度必须大于5小于17'\r\n else:\r\n # hash_pwd = make_password(new_pwd)\r\n # request.user.password = hash_pwd #直接设置加密后的密码\r\n request.user.set_password(new_pwd) #自动将明文加密\r\n request.user.save() #此时密码修改,当前用户直接退出了\r\n return SuccessResponse()\r\n return ErrorResponse(error_msg)\r\n\r\n change_user_Form = ChangeUserInfoForm(request.POST)\r\n if change_user_Form.is_valid():\r\n old_username = request.POST.get('old_username','')\r\n user = UserProfile.objects.filter(username=old_username).first()\r\n user.username = change_user_Form.cleaned_data['username']\r\n user.idcard = request.POST.get('idcard','')\r\n user.save()\r\n content['success_change_msg'] = f'用户->{old_username}:修改成功'\r\n else:\r\n content['error_change_msg'] = change_user_Form.errors.get('idcard','') + change_user_Form.errors.get('username','')\r\n request.session['editor_user_content'] = content\r\n return redirect(reverse('personal_info'))\r\n#分页数据 ,输入模型数据\r\ndef get_model_list_data(request,model_all_list):\r\n paginator = Paginator(model_all_list, settings.EACH_PAGE_USERS_NUM) #创建分页器对象; 每5个进行分页\r\n page_num = request.GET.get('page', 1) # 获取url的页面参数,默认为1\r\n page_of_objs = paginator.get_page(page_num) #返回一个number页码 的Page对象, 不在范围则返回默认值1页码\r\n current_page_num = page_of_objs.number # 获取当前页码,前后各两页\r\n page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + list(\r\n range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))\r\n # 加上省略页码标记\r\n if page_range[0] - 1 >= 2:\r\n page_range.insert(0, '...')\r\n if paginator.num_pages - page_range[-1] >= 2:\r\n page_range.append('...')\r\n # 加上首尾页\r\n if page_range[0] != 1:\r\n page_range.insert(0, 1)\r\n if page_range[-1] != paginator.num_pages:\r\n page_range.append(paginator.num_pages)\r\n\r\n content = {}\r\n content['objs'] = page_of_objs.object_list #当前页的所有对象\r\n content['page_of_objs'] = page_of_objs\r\n content['page_range'] = page_range\r\n return content\r\n\r\n#ajax请求成功 和失败 返回的响应信息\r\ndef SuccessResponse():\r\n data = {}\r\n data['status'] = 'SUCCESS'\r\n return JsonResponse(data)\r\ndef ErrorResponse(error_message):\r\n data = {}\r\n data['status'] = 'ERROR'\r\n data['ajax_delete_error_msg'] = error_message\r\n return JsonResponse(data)\r\n\r\nclass AdminIndexView(View): #删除---展示---编辑用户\r\n def get(self,request): #删除---展示用户\r\n if request.is_ajax(): #返回bool,如果是ajax请求,即删除\r\n idcard = request.GET.get('obj_unique', '')\r\n user = UserProfile.objects.filter(idcard=idcard).first()\r\n if user:\r\n user.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该用户不存在')\r\n\r\n users = UserProfile.objects.all()\r\n content = get_model_list_data(request, users)\r\n if request.session.get('editoe_user_content',''):\r\n content.update(request.session.get('editoe_user_content'))\r\n del request.session['editoe_user_content'] #取到session后删除,否则后续会一直取到\r\n\r\n return render(request, 'admin_index.html', content)\r\n def post(self,request): #编辑用户\r\n if request.is_ajax(): #返回bool,如果是ajax请求,即更新审核状态\r\n idcard = request.POST.get('obj_unique', '')\r\n user = UserProfile.objects.filter(idcard=idcard).first()\r\n if user:\r\n user.is_checked = False if user.is_checked else True\r\n user.save()\r\n print(user.is_checked)\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该用户不存在')\r\n\r\n content = {}\r\n editor_user_form = EditorUserForm(request.POST)\r\n if editor_user_form.is_valid():\r\n old_username = editor_user_form.cleaned_data['old_username']\r\n user = UserProfile.objects.filter(username=old_username).first()\r\n if user.is_authenticated:\r\n user.username = editor_user_form.cleaned_data['username']\r\n user.idcard = editor_user_form.cleaned_data['idcard']\r\n user.level = editor_user_form.cleaned_data['level']\r\n user.major = editor_user_form.cleaned_data['major']\r\n user.role = editor_user_form.cleaned_data['role']\r\n user.save()\r\n content['success_change_msg'] = f'用户->{old_username}:修改成功'\r\n else:\r\n content['error_change_msg'] = editor_user_form.errors.get('idcard','') + editor_user_form.errors.get('username','')\r\n request.session['editoe_user_content'] = content\r\n return redirect(reverse('admin_index'))\r\nclass AdminQueryUserView(View): #查询用户\r\n def get(self,request):\r\n content = {}\r\n query_form = request.session.get('query_user_form',{})\r\n if query_form:\r\n del request.session['query_user_form']\r\n users = UserProfile.objects.filter(level=query_form.get('level',), major=query_form.get('major',''))\r\n if query_form.get('username',''):\r\n users = users.filter(username__icontains=query_form['username'])\r\n if query_form.get('idcard',''):\r\n users = users.filter(idcard__icontains=query_form['idcard'])\r\n content.update(get_model_list_data(request, users))\r\n if not users.count(): #当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'admin_index.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['level'] = request.POST.get('level','')\r\n query['major'] = request.POST.get('major', '')\r\n query['username'] = request.POST.get('username', '')\r\n query['idcard'] = request.POST.get('idcard', '')\r\n request.session['query_user_form'] = query\r\n return redirect(reverse('query_user'))\r\n\r\nclass AdminAddUserView(View): # 添加用户\r\n def get(self,request):\r\n return render(request, 'add_user.html', {})\r\n\r\n def post(self,request):\r\n content = {}\r\n file = request.FILES.get('user_file',None)\r\n if file:\r\n with file as f:\r\n users_info = f.read().decode('gb2312') #读取的是bytes类型,解码为中文字符串\r\n users_ls = users_info.split('\\r\\n') #得到所有用户的列表\r\n error_add_ls = []\r\n for user_info in users_ls: #遍历每个用户\r\n if not user_info:\r\n continue\r\n fields = user_info.split(' ') #单个用户各字段的信息\r\n fields = [field for field in fields if field] #得到干净的用户字段\r\n try:\r\n dic = {'username':fields[0],'password':fields[1],'idcard':fields[2],'level':fields[3],'major':fields[4],'role':fields[5]}\r\n add_form = AddUserForm(dic)\r\n if add_form.is_valid():\r\n user = UserProfile.objects.create_user(**dict(add_form))\r\n except Exception as e:\r\n error_add_ls.append(str(e))\r\n content['error_add_ls'] = error_add_ls\r\n\r\n # UserProfile.objects.bulk_create(ls) #批量添加\r\n store_name = f'media/{request.user.username}_{file.name}'\r\n with open(store_name,'wb') as store: #将上传的文件保存下来\r\n for chunk in file.chunks():\r\n store.write(chunk) #此处必须写入bytes类型\r\n\r\n add_form = AddUserForm(request.POST)\r\n # 数据是有效的即经过了验证\r\n if add_form.is_valid():\r\n username = add_form.cleaned_data['username']\r\n idcard = add_form.cleaned_data['idcard']\r\n password = add_form.cleaned_data['password'][3:]\r\n level = add_form.cleaned_data['level']\r\n major = add_form.cleaned_data['major']\r\n role = add_form.cleaned_data['role']\r\n # create方法不仅创建了新的对象,而且直接将信息存储到数据库里。\r\n user = UserProfile.objects.create_user(username=username, idcard=idcard, password=password, level=level,\r\n major=major, role=role)\r\n content['success_create_msg'] = f'用户:{username} 创建成功'\r\n else:\r\n content['idcard_error'] = add_form.errors.get('idcard','')\r\n content['username_error'] = add_form.errors.get('username','')\r\n content['last_post'] = request.POST\r\n\r\n return render(request, 'add_user.html', content)\r\n\r\nclass AdminPlanManageView(View): #方案管理\r\n def get(self,request): #方案列表\r\n plans = Plan.objects.all()\r\n content = get_model_list_data(request,plans)\r\n if request.session.get('add_plan_content',''):\r\n content.update(request.session.get('add_plan_content'))\r\n del request.session['add_plan_content'] #取到session后删除,否则后续会一直取到\r\n if request.session.get('editoe_plan_content',''):\r\n content.update(request.session.get('editoe_plan_content'))\r\n del request.session['editoe_plan_content']\r\n return render(request, 'plan_manage.html', content)\r\n def post(self,request): #添加方案\r\n content = {}\r\n plan_form = AddPlanForm(request.POST)\r\n if plan_form.is_valid():\r\n plan_name = plan_form.cleaned_data['plan_name']\r\n single_choice_num = int(request.POST.get('single_choice_num','')) if request.POST.get('single_choice_num','') else 0 #如果没填则为o\r\n single_choice_score = int(request.POST.get('single_choice_score','')) if request.POST.get('single_choice_score','') else 0\r\n mul_choice_num = int(request.POST.get('mul_choice_num','')) if request.POST.get('mul_choice_num','') else 0\r\n mul_choice_score = int(request.POST.get('mul_choice_score','')) if request.POST.get('mul_choice_score','') else 0\r\n judge_num = int(request.POST.get('judge_num','')) if request.POST.get('judge_num','') else 0\r\n judge_score = int(request.POST.get('judge_score','')) if request.POST.get('judge_score','') else 0\r\n plan = Plan.objects.create(plan_name=plan_name,single_choice_num=single_choice_num,single_choice_score=single_choice_score,\r\n mul_choice_num=mul_choice_num,mul_choice_score=mul_choice_score,\r\n judge_num=judge_num,judge_score=judge_score\r\n )\r\n content['success_create_msg'] = f'方案:{plan_name} 创建成功'\r\n else:\r\n content['error_create_msg'] = plan_form.errors.get('plan_name','')\r\n request.session['add_plan_content'] = content\r\n return redirect(reverse('plan_manage'))\r\nclass AdminQueryPlanView(View): #查询方案\r\n def get(self,request):\r\n content = {}\r\n query_form = request.session.get('query_plan_form','')\r\n if query_form:\r\n del request.session['query_plan_form']\r\n plans = Plan.objects.filter(plan_name__icontains=query_form['plan_name'])\r\n content.update(get_model_list_data(request, plans))\r\n if not plans.count(): #如果查询结果为空\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'plan_manage.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['plan_name'] = request.POST.get('plan_name','')\r\n request.session['query_plan_form'] = query\r\n return redirect(reverse('query_plan'))\r\nclass AdminChangePlanView(View):\r\n def get(self,request): #ajax删除方案\r\n plan_name = request.GET.get('obj_unique', '')\r\n plan = Plan.objects.filter(plan_name=plan_name).first()\r\n if plan:\r\n plan.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该用户不存在')\r\n def post(self,request): #修改方案\r\n content = {}\r\n plan_form = EditorPlanForm(request.POST)\r\n\r\n if plan_form.is_valid():\r\n plan_name = plan_form.cleaned_data['plan_name']\r\n old_plan_name = plan_form.cleaned_data['old_plan_name']\r\n plan = Plan.objects.filter(plan_name=old_plan_name).first()\r\n single_choice_num = int(request.POST.get('single_choice_num', '')) if request.POST.get('single_choice_num','') else 0 # 如果没填则为o\r\n single_choice_score = int(request.POST.get('single_choice_score', '')) if request.POST.get(\r\n 'single_choice_score', '') else 0\r\n mul_choice_num = int(request.POST.get('mul_choice_num', '')) if request.POST.get('mul_choice_num',\r\n '') else 0\r\n mul_choice_score = int(request.POST.get('mul_choice_score', '')) if request.POST.get('mul_choice_score',\r\n '') else 0\r\n judge_num = int(request.POST.get('judge_num', '')) if request.POST.get('judge_num', '') else 0\r\n judge_score = int(request.POST.get('judge_score', '')) if request.POST.get('judge_score', '') else 0\r\n\r\n if plan:\r\n plan.plan_name = plan_name\r\n plan.single_choice_num = single_choice_num\r\n plan.single_choice_score = single_choice_score\r\n plan.mul_choice_num = mul_choice_num\r\n plan.mul_choice_score = mul_choice_score\r\n plan.judge_num = judge_num\r\n plan.judge_score = judge_score\r\n plan.save()\r\n content['success_change_msg'] = f'方案名->{plan_name}:修改成功'\r\n else:\r\n content['error_change_msg'] = plan_form.errors.get('plan_name', '')\r\n request.session['editoe_plan_content'] = content\r\n return redirect(reverse('plan_manage'))\r\n\r\n\r\nclass AdminExamTemplateView(View): #试卷模板\r\n def get(self,request):\r\n content = {}\r\n content['all_plans'] = Plan.objects.all()\r\n content['all_subjects'] = ('英语','语文','生物','化学')\r\n content['all_difficults'] = ('简单','一般', '困难','极难')\r\n exam_templates = ExamTemplate.objects.all()\r\n content['all_exem_templates'] = exam_templates\r\n content.update(get_model_list_data(request, exam_templates))\r\n if request.session.get('editoe_exam_template_content',''):\r\n content.update(request.session.get('editoe_exam_template_content', {}))\r\n del request.session['editoe_exam_template_content']\r\n if request.session.get('add_exam_template_content',''):\r\n content.update(request.session.get('add_exam_template_content',{}))\r\n del request.session['add_exam_template_content']\r\n return render(request, 'exam_template.html', content)\r\n def post(self,request): #添加试卷模板\r\n content = {}\r\n exam_template_form = AddExamTemplateForm(request.POST)\r\n if exam_template_form.is_valid():\r\n exam_template_name = exam_template_form.cleaned_data.get('exam_template_name','')\r\n exam_name = exam_template_form.cleaned_data.get('exam_name','')\r\n exam_th = int(request.POST.get('exam_th', '')) if request.POST.get('exam_th','') else 0\r\n subject = request.POST.get('subject', '')\r\n plan_name = request.POST.get('plan_name', '')\r\n plan = Plan.objects.filter(plan_name=plan_name).first() #外键必须将外键对象加上去才能创建\r\n difficult = request.POST.get('difficult', '')\r\n exam_start_time = request.POST.get('exam_start_time', '') #str 2019-01-01T08:23,该字段传入格式就是这样\r\n exam_end_time = request.POST.get('exam_end_time', '')\r\n exam_start_time = datetime.now().strptime(exam_start_time, \"%Y-%m-%dT%H:%M\") #将时间字符串转换为时间格式\r\n exam_end_time = datetime.now().strptime(exam_end_time, \"%Y-%m-%dT%H:%M\")\r\n try:\r\n exam_template = ExamTemplate.objects.create(exam_template_name=exam_template_name,exam_name=exam_name,exam_th=exam_th,\r\n subject=subject,plan=plan,difficult=difficult,\r\n exam_start_time=exam_start_time,exam_end_time=exam_end_time)\r\n content['success_create_msg'] = f'试卷模板:{exam_template_name} 创建成功'\r\n except Exception as e:\r\n print(e)\r\n else:\r\n content['error_create_msg'] = exam_template_form.errors.get('exam_template_name', '') + exam_template_form.errors.get('exam_name', '')\r\n request.session['add_exam_template_content'] = content\r\n return redirect(reverse('exam_template'))\r\nclass AdminChangeExamTemplateView(View):\r\n def get(self,request): #ajax删除试卷模板\r\n exam_template_name = request.GET.get('obj_unique', '')\r\n exam_template = ExamTemplate.objects.filter(exam_template_name=exam_template_name).first()\r\n if exam_template:\r\n exam_template.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该模板不存在')\r\n def post(self,request): #修改试卷模板\r\n content = {}\r\n exam_template_form = EditorExamTemplateForm(request.POST)\r\n if exam_template_form.is_valid():\r\n old_exam_template_name = exam_template_form.cleaned_data['old_exam_template_name']\r\n exam_template_name = exam_template_form.cleaned_data['exam_template_name']\r\n old_exam_name = exam_template_form.cleaned_data['old_exam_name']\r\n exam_name = exam_template_form.cleaned_data['exam_name']\r\n exam_template = ExamTemplate.objects.filter(exam_template_name=old_exam_template_name).first()\r\n\r\n exam_template.exam_template_name = exam_template_name\r\n exam_template.exam_name = exam_name\r\n exam_template.exam_th = int(request.POST.get('exam_th', '')) if request.POST.get('exam_th', '') else 0\r\n exam_template.subject = request.POST.get('subject', '')\r\n plan_name = request.POST.get('plan_name', '')\r\n exam_template.plan = Plan.objects.filter(plan_name=plan_name).first() # 外键必须将外键对象加上去才能创建\r\n exam_template.difficult = request.POST.get('difficult', '')\r\n exam_start_time = request.POST.get('exam_start_time', '') # str 2019-01-01T08:23,该字段传入格式就是这样\r\n exam_end_time = request.POST.get('exam_end_time', '')\r\n exam_template.exam_start_time = datetime.now().strptime(exam_start_time, \"%Y-%m-%dT%H:%M\") # 将时间字符串转换为时间格式\r\n exam_template.exam_end_time = datetime.now().strptime(exam_end_time, \"%Y-%m-%dT%H:%M\")\r\n exam_template.save()\r\n content['success_change_msg'] = f'试卷模板名->{exam_template_name}:修改成功'\r\n else:\r\n content['error_change_msg'] = exam_template_form.errors.get('exam_template_name', '') + exam_template_form.errors.get('exam_name', '')\r\n request.session['editoe_exam_template_content'] = content\r\n return redirect(reverse('exam_template'))\r\nclass AdminQueryExamTemplateView(View): #查询试卷模板\r\n def get(self,request):\r\n content = {}\r\n content['all_plans'] = Plan.objects.all()\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_difficults'] = ('简单', '一般', '困难', '极难')\r\n exam_templates = ExamTemplate.objects.all()\r\n query_form = request.session.get('query_exam_template_form','')\r\n\r\n plan_name = query_form['plan_name']\r\n plan = get_object_or_404(Plan,plan_name=plan_name)\r\n exam_templates = ExamTemplate.objects.filter(subject=query_form['subject'],plan=plan, difficult=query_form['difficult'])\r\n if query_form['exam_template_name']:\r\n exam_templates = exam_templates.filter(exam_template_name=query_form['exam_template_name'])\r\n if query_form['exam_name']:\r\n exam_templates = exam_templates.filter(exam_name=query_form['exam_name'])\r\n if query_form['exam_th']:\r\n exam_templates = exam_templates.filter(exam_th=query_form['exam_th'])\r\n content.update(get_model_list_data(request, exam_templates))\r\n if not exam_templates.count(): # 当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'exam_template.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['exam_template_name'] = request.POST.get('exam_template_name', '')\r\n query['exam_name'] = request.POST.get('exam_name', '')\r\n query['exam_th'] = int(request.POST.get('exam_th', '')) if request.POST.get('exam_th', '') else 0\r\n query['subject'] = request.POST.get('subject', '')\r\n query['plan_name'] = request.POST.get('plan_name', '')\r\n query['difficult'] = request.POST.get('difficult', '')\r\n request.session['query_exam_template_form'] = query\r\n return redirect(reverse('query_exam_template'))\r\n\r\nclass AdminExamCreateView(View):\r\n def get(self,request): #试卷列表\r\n content = {}\r\n content['all_levels'] = ('本科','专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程','法学','数学')\r\n content['all_templates'] = ExamTemplate.objects.all()\r\n exams = Exam.objects.all()\r\n content.update(get_model_list_data(request, exams))\r\n if request.session.get('random_add_exam_content',''):\r\n content.update(request.session.get('random_add_exam_content', {}))\r\n del request.session['random_add_exam_content']\r\n return render(request, 'exam_create.html', content)\r\n def post(self,request): # 添加试卷模板\r\n content = {}\r\n exam_name = request.POST.get('exam_name', '')\r\n exam_template_name = request.POST.get('exam_template_name','')\r\n level = request.POST.get('level','')\r\n major = request.POST.get('major','')\r\n exam_num = int(request.POST.get('exam_num', '')) if request.POST.get('exam_num', '') else 0\r\n\r\n exam_obj_ls = []\r\n if exam_num>0:\r\n exam_template = ExamTemplate.objects.filter(exam_template_name=exam_template_name).first()\r\n subject = exam_template.subject\r\n difficult = exam_template.difficult\r\n single_choice_num = exam_template.plan.single_choice_num\r\n mul_choice_num = exam_template.plan.mul_choice_num\r\n judge_num = exam_template.plan.judge_num\r\n\r\n knowledges = Knowledge.objects.filter(level=level, major=major, subject=subject)\r\n question_objs = Question.objects.none()\r\n for knowledge in knowledges:\r\n questions = knowledge.question_set.all().filter(difficult=difficult) #从该知识点关联的所有题目中找到适合难度的题目\r\n question_objs = question_objs|questions #对queryset进行合并\r\n single_choice_questions_qset = question_objs.filter(question_type='单选题')\r\n mul_choice_questions_qset = question_objs.filter(question_type='多选题')\r\n judge_questions_qset = question_objs.filter(question_type='判断题')\r\n\r\n for i in range(exam_num):\r\n single_choice_id_ls = []\r\n mul_choice_id_ls = []\r\n judge_id_ls = []\r\n for i in range(single_choice_num):\r\n random_index = random.randint(0, len(single_choice_questions_qset) - 1)\r\n single_choice_id_ls.append(single_choice_questions_qset[random_index].id)\r\n for i in range(mul_choice_num):\r\n random_index = random.randint(0, len(mul_choice_questions_qset) - 1)\r\n mul_choice_id_ls.append(mul_choice_questions_qset[random_index].id)\r\n for i in range(judge_num):\r\n random_index = random.randint(0, len(judge_questions_qset) - 1)\r\n judge_id_ls.append(judge_questions_qset[random_index].id)\r\n exam_dict = {'single_choice_id_ls': single_choice_id_ls, 'mul_choice_id_ls': mul_choice_id_ls,\r\n 'judge_id_ls': judge_id_ls}\r\n exam_dict = json.dumps(exam_dict)\r\n exam = Exam(exam_template=exam_template,major=major,level=level,exam_name=exam_name,question_id_ls = exam_dict)\r\n exam_obj_ls.append(exam)\r\n try:\r\n Exam.objects.bulk_create(exam_obj_ls)\r\n content['success_create_msg'] = f'成功随机生成{exam_num}张试卷'\r\n except Exception as e:\r\n print(e)\r\n else:\r\n content['error_create_msg'] = '随机生成失败'\r\n request.session['random_add_exam_content'] = content\r\n return redirect(reverse('exam_create'))\r\nclass AdminQueryExamView(View): #查询试卷模板\r\n def get(self,request):\r\n content = {}\r\n content['all_levels'] = ('本科', '专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程', '法学', '数学')\r\n content['all_templates'] = ExamTemplate.objects.all()\r\n exams = Exam.objects.all()\r\n query_form = request.session.get('query_exam_form', '')\r\n exam_template = ExamTemplate.objects.filter(exam_template_name__icontains=query_form.get('exam_template','')).first()\r\n if exam_template:\r\n queried_exams_1 = exam_template.exam_set.all()\r\n queried_exams = queried_exams_1.filter(level__icontains=query_form.get('level',''),\r\n # subject__icontains=query_form.get('subject',''),\r\n major__icontains=query_form.get('major', '')\r\n )\r\n content.update(get_model_list_data(request, queried_exams))\r\n if not queried_exams.count(): # 当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n else:\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'exam_create.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['level'] = request.POST.get('level', '')\r\n query['subject'] = request.POST.get('subject', '')\r\n query['major'] = request.POST.get('major', '')\r\n query['exam_template'] = request.POST.get('exam_template', '')\r\n request.session['query_exam_form'] = query\r\n return redirect(reverse('query_exam'))\r\nclass AdminChangeExamView(View):\r\n def get(self, request): # 查询试卷\r\n exam_id = request.GET.get('obj_unique', '')\r\n exam = Exam.objects.filter(pk=exam_id).first()\r\n if exam:\r\n exam.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该试卷不存在')\r\n\r\n def post(self, request):\r\n content = {}\r\n exam_template_form = EditorExamTemplateForm(request.POST)\r\n if exam_template_form.is_valid():\r\n old_exam_template_name = exam_template_form.cleaned_data['old_exam_template_name']\r\n exam_template_name = exam_template_form.cleaned_data['exam_template_name']\r\n old_exam_name = exam_template_form.cleaned_data['old_exam_name']\r\n exam_name = exam_template_form.cleaned_data['exam_name']\r\n exam_template = ExamTemplate.objects.filter(exam_template_name=old_exam_template_name).first()\r\n\r\n exam_template.exam_template_name = exam_template_name\r\n exam_template.exam_name = exam_name\r\n exam_template.exam_th = int(request.POST.get('exam_th', '')) if request.POST.get('exam_th', '') else 0\r\n exam_template.subject = request.POST.get('subject', '')\r\n plan_name = request.POST.get('plan_name', '')\r\n exam_template.plan = Plan.objects.filter(plan_name=plan_name).first() # 外键必须将外键对象加上去才能创建\r\n exam_template.difficult = request.POST.get('difficult', '')\r\n exam_start_time = request.POST.get('exam_start_time', '') # str 2019-01-01T08:23,该字段传入格式就是这样\r\n exam_end_time = request.POST.get('exam_end_time', '')\r\n exam_template.exam_start_time = datetime.now().strptime(exam_start_time, \"%Y-%m-%dT%H:%M\") # 将时间字符串转换为时间格式\r\n exam_template.exam_end_time = datetime.now().strptime(exam_end_time, \"%Y-%m-%dT%H:%M\")\r\n exam_template.save()\r\n content['success_change_msg'] = f'试卷模板名->{exam_template_name}:修改成功'\r\n else:\r\n content['error_change_msg'] = exam_template_form.errors.get('exam_template_name',\r\n '') + exam_template_form.errors.get('exam_name',\r\n '')\r\n request.session['editoe_exam_template_content'] = content\r\n return redirect(reverse('exam_template'))\r\n\r\n\r\nclass ExamAllotView(View):\r\n def get(self,request): #试卷列表\r\n content = {}\r\n content['all_levels'] = ('本科','专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程','法学','数学')\r\n content['all_templates'] = ExamTemplate.objects.all()\r\n exams = Exam.objects.all()\r\n content.update(get_model_list_data(request, exams))\r\n return render(request, 'exam_allot.html', content)\r\n def post(self,request): # ajax分配对象\r\n if request.is_ajax():\r\n data = {}\r\n exam_id = request.POST.get('exam_id', '')\r\n level = request.POST.get('level', '')\r\n major = request.POST.get('major', '')\r\n username = request.POST.get('username', '')\r\n idcard = request.POST.get('idcard', '')\r\n exam = Exam.objects.filter(pk=exam_id).first()\r\n users = UserProfile.objects.filter(level=level,major=major,username__icontains=username,idcard__icontains=idcard)\r\n\r\n data['users_ls'] = []\r\n for user in users:\r\n dic = {}\r\n dic['username'] = user.username\r\n dic['idcard'] = user.idcard\r\n dic['level'] = user.level\r\n dic['major'] = user.major\r\n dic['user_id'] = user.id\r\n data['users_ls'].append(dic)\r\n data['status'] = 'SUCCESS'\r\n data['user_num'] = len(users)\r\n return JsonResponse(data)\r\n\r\n else:\r\n user_id_ls = request.POST.getlist('item') #获取该多选框的所有选中的值组成的列表\r\n exam_id = request.POST.get('exam_id','')\r\n exam = Exam.objects.filter(pk=exam_id).first()\r\n print(dir(exam.allot))\r\n for pk in user_id_ls:\r\n user = UserProfile.objects.filter(pk=pk).first()\r\n\r\n exam.allot.add(user)\r\n exam.save()\r\n return redirect(reverse('exam_allot'))\r\n\r\nclass QueryExamAllotView(View): #查询试卷模板\r\n def get(self,request):\r\n content = {}\r\n content['all_levels'] = ('本科', '专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程', '法学', '数学')\r\n content['all_templates'] = ExamTemplate.objects.all()\r\n query_form = request.session.get('query_exam_allot_form', {})\r\n exams = Exam.objects.filter(major=query_form.get('major', ''),level=query_form.get('level', ''))\r\n exam_objs = []\r\n for exam in exams:\r\n if exam.exam_template.subject.count(query_form.get('subject','')):\r\n if exam.exam_template.exam_template_name.count(query_form.get('exam_template','')):\r\n exam_objs.append(exam)\r\n\r\n\r\n\r\n content.update(get_model_list_data(request, exam_objs))\r\n if not exam_objs: # 当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'exam_allot.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['level'] = request.POST.get('level', '')\r\n query['subject'] = request.POST.get('subject', '')\r\n query['major'] = request.POST.get('major', '')\r\n query['exam_template'] = request.POST.get('exam_template', '')\r\n request.session['query_exam_allot_form'] = query\r\n return redirect(reverse('query_exam_allot'))\r\n\r\n\r\nclass ExamRecordView(View):\r\n def get(self,request): #试卷列表\r\n if request.is_ajax(): #删除\r\n exam_record_id = request.GET.get('obj_unique', '')\r\n exam_record = ExamRecord.objects.filter(pk=exam_record_id).first()\r\n\r\n if exam_record:\r\n exam_record.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该考试记录不存在')\r\n content = {}\r\n content['all_levels'] = ('本科','专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程','法学','数学')\r\n exam_records = ExamRecord.objects.all()\r\n content.update(get_model_list_data(request, exam_records))\r\n if request.session.get('change_exam_record_content',''):\r\n content.update(request.session.get('change_exam_record_content', {}))\r\n del request.session['change_exam_record_content']\r\n return render(request, 'exam_record.html', content)\r\n def post(self,request):\r\n exam_record_id = request.POST.get('exam_record_id','')\r\n exam_record = ExamRecord.objects.filter(pk=exam_record_id).first()\r\n exam_record.answer = request.POST.get('answer','')\r\n exam_record.grade = request.POST.get('grade', '')\r\n exam_record.save()\r\n content = {}\r\n content['success_change_msg'] = f'考试记录:修改成功'\r\n request.session['change_exam_record_content'] = content\r\n return redirect(reverse('exam_record'))\r\n\r\nclass QueryExamRecordView(View): #查询试卷模板\r\n def get(self,request):\r\n content = {}\r\n content['all_levels'] = ('本科', '专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程', '法学', '数学')\r\n\r\n query_form = request.session.get('query_exam_record_form', {})\r\n exam_records = ExamRecord.objects.filter(user__level=query_form.get('level',''),\r\n user__major=query_form.get('major',''),\r\n exam__exam_template__subject=query_form.get('subject',''),\r\n user__username__icontains=query_form.get('username',''),\r\n user__idcard__icontains=query_form.get('idcard',''))\r\n\r\n\r\n content.update(get_model_list_data(request, exam_records))\r\n if not exam_records: # 当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'exam_record.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['level'] = request.POST.get('level', '')\r\n query['subject'] = request.POST.get('subject', '')\r\n query['major'] = request.POST.get('major', '')\r\n query['username'] = request.POST.get('username', '')\r\n query['idcard'] = request.POST.get('idcard', '')\r\n request.session['query_exam_record_form'] = query\r\n return redirect(reverse('query_exam_record'))\r\n\r\n\r\nclass ExamGradeView(View):\r\n def get(self,request):\r\n if request.is_ajax(): #删除\r\n exam_record_id = request.GET.get('obj_unique', '')\r\n exam_record = ExamRecord.objects.filter(pk=exam_record_id).first()\r\n\r\n if exam_record:\r\n exam_record.delete()\r\n return SuccessResponse()\r\n else:\r\n return ErrorResponse('该考试成绩不存在')\r\n content = {}\r\n content['all_levels'] = ('本科','专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程','法学','数学')\r\n exam_records = ExamRecord.objects.all()\r\n content.update(get_model_list_data(request, exam_records))\r\n if request.session.get('change_exam_grade_content',''):\r\n content.update(request.session.get('change_exam_grade_content', {}))\r\n del request.session['change_exam_grade_content']\r\n return render(request, 'exam_grade.html', content)\r\n def post(self,request): # ajax\r\n if request.is_ajax():\r\n data = {}\r\n level = request.POST.get('level', '')\r\n major = request.POST.get('major', '')\r\n subject = request.POST.get('subject', '')\r\n username = request.POST.get('username', '')\r\n idcard = request.POST.get('idcard', '')\r\n exam_records = ExamRecord.objects.filter(user__level=level,user__major=major,user__username__icontains=username,\r\n user__idcard__icontains=idcard,exam__exam_template__subject=subject)\r\n\r\n data['exam_record_ls'] = []\r\n for exam_record in exam_records:\r\n dic = {}\r\n dic['username'] = exam_record.user.username\r\n dic['idcard'] = exam_record.user.idcard\r\n dic['level'] = exam_record.user.level\r\n dic['major'] = exam_record.user.major\r\n dic['subject'] = exam_record.exam.exam_template.subject\r\n dic['exam_record_id'] = exam_record.id\r\n dic['grade'] = exam_record.grade\r\n data['exam_record_ls'].append(dic)\r\n data['status'] = 'SUCCESS'\r\n data['exam_record_num'] = len(exam_records)\r\n return JsonResponse(data)\r\n else:\r\n exam_record_id = request.POST.get('exam_record_id', '')\r\n exam_record = ExamRecord.objects.filter(pk=exam_record_id).first()\r\n exam_record.grade = request.POST.get('grade', '')\r\n exam_record.save()\r\n content = {}\r\n content['success_change_msg'] = f'考试成绩:修改成功'\r\n request.session['change_exam_grade_content'] = content\r\n return redirect(reverse('exam_grade'))\r\n\r\nclass ExportView(View):\r\n def post(self,request):\r\n content = {}\r\n exam_record_id_ls = request.POST.getlist('item') # 获取该多选框的所有选中的值组成的列表\r\n if exam_record_id_ls:\r\n response = HttpResponse(content_type='application/vnd.ms-excel')\r\n response['Content-Disposition'] = 'attachment;filename=student_grade.xls' #不知道为什么,中文名字不行【变成下载了】,英文就能设置\r\n # 创建一个文件对象\r\n wb = xlwt.Workbook(encoding='utf8')\r\n # 创建一个sheet对象\r\n sheet = wb.add_sheet('学生成绩表')\r\n # 写入文件标题\r\n sheet.write(0, 0, '姓名')\r\n sheet.write(0, 1, '身份证号')\r\n sheet.write(0, 2, '层次')\r\n sheet.write(0, 3, '专业')\r\n sheet.write(0, 4, '科目\t')\r\n sheet.write(0, 5, '考试批次')\r\n sheet.write(0, 6, '成绩')\r\n\r\n\r\n for index,pk in enumerate(exam_record_id_ls):\r\n exam_record = ExamRecord.objects.filter(pk=pk).first()\r\n sheet.write(index+1, 0, exam_record.user.username)\r\n sheet.write(index+1, 1, exam_record.user.idcard)\r\n sheet.write(index+1, 2, exam_record.user.level)\r\n sheet.write(index+1, 3, exam_record.user.major)\r\n sheet.write(index+1, 4, exam_record.exam.exam_template.subject)\r\n sheet.write(index+1, 5, exam_record.exam.exam_template.exam_th)\r\n sheet.write(index+1, 6, exam_record.grade)\r\n\r\n # 写出到IO\r\n output = BytesIO()\r\n wb.save(output)\r\n # 重新定位到开始\r\n output.seek(0)\r\n response.write(output.getvalue())\r\n content['export_success_msg'] = f'导出{len(exam_record_id_ls)}个学生考试记录信息'\r\n return response\r\n else:\r\n content['export_error_msg'] = '没有选中考试记录,导出失败'\r\n request.session['change_exam_grade_content'] = content\r\n return redirect(reverse('exam_grade'))\r\nclass QueryExamGradeView(View):\r\n def get(self,request):\r\n content = {}\r\n content['all_levels'] = ('本科', '专科')\r\n content['all_subjects'] = ('英语', '语文', '生物', '化学')\r\n content['all_majors'] = ('软件工程', '法学', '数学')\r\n exam = Exam.objects.first()\r\n Exam.objects.filter(examrecord__grade__gt=0)\r\n query_form = request.session.get('query_exam_record_form', {})\r\n exam_records = ExamRecord.objects.filter(user__level=query_form.get('level',''),\r\n user__major=query_form.get('major',''),\r\n exam__exam_template__subject=query_form.get('subject',''),\r\n user__username__icontains=query_form.get('username',''),\r\n user__idcard__icontains=query_form.get('idcard',''))\r\n\r\n\r\n content.update(get_model_list_data(request, exam_records))\r\n if not exam_records: # 当返回 int 0时,返回empty_error 信息\r\n content['query_empty_error'] = f'该查询 {query_form} :结果为空'\r\n content['last_query_form'] = query_form\r\n return render(request, 'exam_grade.html', content)\r\n def post(self,request):\r\n query = {}\r\n query['level'] = request.POST.get('level', '')\r\n query['subject'] = request.POST.get('subject', '')\r\n query['major'] = request.POST.get('major', '')\r\n query['username'] = request.POST.get('username', '')\r\n query['idcard'] = request.POST.get('idcard', '')\r\n request.session['query_exam_record_form'] = query\r\n return redirect(reverse('query_exam_grade'))\r\n\r\n# def export_excel(request):\r\n# # 设置HTTPResponse的类型\r\n# response = HttpResponse(content_type='application/vnd.ms-excel')\r\n# response['Content-Disposition'] = 'attachment;filename=学生成绩表.xls'\r\n# # 创建一个文件对象\r\n# wb = xlwt.Workbook(encoding='utf8')\r\n# # 创建一个sheet对象\r\n# sheet = wb.add_sheet('exam_record_sheet')\r\n#\r\n# # 设置文件头的样式,这个不是必须的可以根据自己的需求进行更改\r\n# style_heading = xlwt.easyxf(\"\"\"\r\n# font:\r\n# name Arial,\r\n# colour_index white,\r\n# bold on,\r\n# height 0xA0;\r\n# align:\r\n# wrap off,\r\n# vert center,\r\n# horiz center;\r\n# pattern:\r\n# pattern solid,\r\n# fore-colour 0x19;\r\n# borders:\r\n# left THIN,\r\n# right THIN,\r\n# top THIN,\r\n# bottom THIN;\r\n# \"\"\")\r\n#\r\n# # 写入文件标题\r\n# sheet.write(0, 0, '申请编号', style_heading)\r\n# sheet.write(0, 1, '客户名称', style_heading)\r\n# sheet.write(0, 2, '联系方式', style_heading)\r\n# sheet.write(0, 3, '身份证号码', style_heading)\r\n# sheet.write(0, 4, '办理日期', style_heading)\r\n# sheet.write(0, 5, '处理人', style_heading)\r\n# sheet.write(0, 6, '处理状态', style_heading)\r\n# sheet.write(0, 7, '处理时间', style_heading)\r\n#\r\n# # 写出到IO\r\n# output = BytesIO()\r\n# wb.save(output)\r\n# # 重新定位到开始\r\n# output.seek(0)\r\n# response.write(output.getvalue())\r\n# return response\r\n\r\ndef bad_request(request):\r\n return render(request, '400.html')\r\n\r\n\r\ndef permission_denied(request):\r\n return render(request, '403.html')\r\n\r\n\r\ndef page_not_found(request):\r\n return render(request, '404.html')\r\n\r\n\r\ndef error(request):\r\n # raise Http404('资源不存在,请访问 xxx 查看')\r\n return render(request, '500.html')\r\n", "sub_path": "onlineExamSystem/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 67437, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "django.views.generic.base.View", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 22, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 29, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 31, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 35, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 55, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 60, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 78, "usage_type": "call"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 83, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 92, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 97, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 98, "usage_type": "call"}, {"api_name": "users.models.Question.objects.filter", "line_number": 103, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 103, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 103, "usage_type": "name"}, {"api_name": "users.models.Question.objects.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 105, "usage_type": "name"}, {"api_name": "users.models.Question.objects.filter", "line_number": 107, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 107, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 121, "usage_type": "name"}, {"api_name": "users.models.Question.objects.filter", "line_number": 130, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 130, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.create", "line_number": 144, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 144, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 144, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 150, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 152, "usage_type": "name"}, {"api_name": "users.models.Question.objects.filter", "line_number": 156, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 156, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.all", "line_number": 164, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 164, "usage_type": "name"}, {"api_name": "users.models.Question.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 174, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 176, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 184, "usage_type": "call"}, {"api_name": "forms.QuestionInfoForm", "line_number": 187, "usage_type": "call"}, {"api_name": "users.models.Question.objects.filter", "line_number": 193, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 193, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 193, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 194, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 194, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.get_or_create", "line_number": 200, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 200, "usage_type": "name"}, {"api_name": "users.models.Question.objects.create", "line_number": 214, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 214, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 214, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 228, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 228, "usage_type": "call"}, {"api_name": "forms.EditorQuestionForm", "line_number": 229, "usage_type": "call"}, {"api_name": "users.models.Question.objects.filter", "line_number": 232, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 232, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 242, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 242, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 242, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 250, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 250, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 252, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 257, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 257, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 257, "usage_type": "name"}, {"api_name": "users.models.Question.objects.filter", "line_number": 260, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 260, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 260, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 265, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 272, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 272, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 274, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.all", "line_number": 280, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 280, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 280, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 284, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 291, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 291, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 291, "usage_type": "name"}, {"api_name": "users.models.Question.objects.create", "line_number": 301, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 301, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 301, "usage_type": "name"}, {"api_name": "users.models.Question.objects.create", "line_number": 306, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 306, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 306, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 311, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 311, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 313, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 317, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 317, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 317, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.all", "line_number": 331, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 331, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 331, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 333, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 337, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 337, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 337, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.create", "line_number": 343, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 343, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 343, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 346, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 346, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 348, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.all", "line_number": 355, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 355, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 355, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 357, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 357, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 357, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 366, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 373, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 373, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 375, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 382, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 384, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 386, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 394, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 394, "usage_type": "call"}, {"api_name": "forms.ChangeUserInfoForm", "line_number": 416, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 419, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 419, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 419, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 427, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 427, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 430, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 457, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 462, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 464, "usage_type": "name"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 468, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 468, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 468, "usage_type": "name"}, {"api_name": "users.models", "line_number": 475, "usage_type": "name"}, {"api_name": "users.models.UserProfile.objects.all", "line_number": 475, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 475, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 475, "usage_type": "name"}, {"api_name": "users.models", "line_number": 476, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 481, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 485, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 485, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 485, "usage_type": "name"}, {"api_name": "forms.EditorUserForm", "line_number": 495, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 498, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 498, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 498, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 510, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 510, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 511, "usage_type": "name"}, {"api_name": "users.models", "line_number": 517, "usage_type": "name"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 517, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 517, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 517, "usage_type": "name"}, {"api_name": "users.models", "line_number": 519, "usage_type": "name"}, {"api_name": "users.models.filter", "line_number": 519, "usage_type": "call"}, {"api_name": "users.models", "line_number": 521, "usage_type": "name"}, {"api_name": "users.models.filter", "line_number": 521, "usage_type": "call"}, {"api_name": "users.models", "line_number": 522, "usage_type": "argument"}, {"api_name": "users.models.count", "line_number": 523, "usage_type": "call"}, {"api_name": "users.models", "line_number": 523, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 526, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 534, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 534, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 536, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 538, "usage_type": "call"}, {"api_name": "forms.AddUserForm", "line_number": 555, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects.create_user", "line_number": 557, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 557, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 557, "usage_type": "name"}, {"api_name": "forms.AddUserForm", "line_number": 568, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects.create_user", "line_number": 578, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 578, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 578, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 586, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 588, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.all", "line_number": 590, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 590, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 590, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 598, "usage_type": "call"}, {"api_name": "forms.AddPlanForm", "line_number": 601, "usage_type": "call"}, {"api_name": "users.models.Plan.objects.create", "line_number": 610, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 610, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 610, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 618, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 618, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 619, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.filter", "line_number": 625, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 625, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 625, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 630, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 635, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 635, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 636, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.filter", "line_number": 639, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 639, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 639, "usage_type": "name"}, {"api_name": "forms.EditorPlanForm", "line_number": 647, "usage_type": "call"}, {"api_name": "users.models.Plan.objects.filter", "line_number": 652, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 652, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 652, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 676, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 676, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 679, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.all", "line_number": 682, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 682, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 682, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.all", "line_number": 685, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 685, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 685, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 694, "usage_type": "call"}, {"api_name": "forms.AddExamTemplateForm", "line_number": 697, "usage_type": "call"}, {"api_name": "users.models.Plan.objects.filter", "line_number": 704, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 704, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 704, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 708, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 708, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 709, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 709, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.create", "line_number": 711, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 711, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 711, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 720, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 720, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 721, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.filter", "line_number": 724, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 724, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 724, "usage_type": "name"}, {"api_name": "forms.EditorExamTemplateForm", "line_number": 732, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects.filter", "line_number": 738, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 738, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 738, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.filter", "line_number": 745, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 745, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 745, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 749, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 749, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 750, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 750, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 756, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 756, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 757, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.all", "line_number": 760, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 760, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 760, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.all", "line_number": 763, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 763, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 763, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 767, "usage_type": "call"}, {"api_name": "users.models.Plan", "line_number": 767, "usage_type": "argument"}, {"api_name": "users.models.ExamTemplate.objects.filter", "line_number": 768, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 768, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 768, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 779, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 789, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 789, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 791, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.all", "line_number": 797, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 797, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 797, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.all", "line_number": 798, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 798, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 798, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 803, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects.filter", "line_number": 814, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 814, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 814, "usage_type": "name"}, {"api_name": "users.models.Knowledge.objects.filter", "line_number": 821, "usage_type": "call"}, {"api_name": "users.models.Knowledge.objects", "line_number": 821, "usage_type": "attribute"}, {"api_name": "users.models.Knowledge", "line_number": 821, "usage_type": "name"}, {"api_name": "users.models.Question.objects.none", "line_number": 822, "usage_type": "call"}, {"api_name": "users.models.Question.objects", "line_number": 822, "usage_type": "attribute"}, {"api_name": "users.models.Question", "line_number": 822, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 835, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 838, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 841, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 845, "usage_type": "call"}, {"api_name": "users.models.Exam", "line_number": 846, "usage_type": "call"}, {"api_name": "users.models.Exam.objects.bulk_create", "line_number": 849, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 849, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 849, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 856, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 856, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 857, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.all", "line_number": 863, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 863, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 863, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.all", "line_number": 864, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 864, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 864, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.filter", "line_number": 866, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 866, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 866, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 879, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 887, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 887, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 888, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 891, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 891, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 891, "usage_type": "name"}, {"api_name": "forms.EditorExamTemplateForm", "line_number": 900, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects.filter", "line_number": 906, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 906, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 906, "usage_type": "name"}, {"api_name": "users.models.Plan.objects.filter", "line_number": 913, "usage_type": "call"}, {"api_name": "users.models.Plan.objects", "line_number": 913, "usage_type": "attribute"}, {"api_name": "users.models.Plan", "line_number": 913, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 917, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 917, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 918, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 918, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 926, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 926, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 929, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.all", "line_number": 935, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 935, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 935, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.all", "line_number": 936, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 936, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 936, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 938, "usage_type": "call"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 947, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 947, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 947, "usage_type": "name"}, {"api_name": "users.models", "line_number": 948, "usage_type": "name"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 948, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 948, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 948, "usage_type": "name"}, {"api_name": "users.models", "line_number": 951, "usage_type": "name"}, {"api_name": "users.models", "line_number": 960, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 961, "usage_type": "call"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 966, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 966, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 966, "usage_type": "name"}, {"api_name": "users.models.UserProfile.objects.filter", "line_number": 969, "usage_type": "call"}, {"api_name": "users.models.UserProfile.objects", "line_number": 969, "usage_type": "attribute"}, {"api_name": "users.models.UserProfile", "line_number": 969, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 973, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 973, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 975, "usage_type": "name"}, {"api_name": "users.models.ExamTemplate.objects.all", "line_number": 981, "usage_type": "call"}, {"api_name": "users.models.ExamTemplate.objects", "line_number": 981, "usage_type": "attribute"}, {"api_name": "users.models.ExamTemplate", "line_number": 981, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 983, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 983, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 983, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 996, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1004, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 1004, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 1007, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1011, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1011, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1011, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.all", "line_number": 1022, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1022, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1022, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1027, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1030, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1030, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1030, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 1037, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 1037, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 1039, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1047, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1047, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1047, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1058, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1067, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 1067, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 1070, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1074, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1074, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1074, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.all", "line_number": 1085, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1085, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1085, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1090, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1099, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1099, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1099, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 1115, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1118, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1118, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 1124, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 1124, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 1126, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 1131, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 1134, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1148, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1148, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1148, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 1158, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1168, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 1168, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 1169, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.first", "line_number": 1175, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 1175, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 1175, "usage_type": "name"}, {"api_name": "users.models.Exam.objects.filter", "line_number": 1176, "usage_type": "call"}, {"api_name": "users.models.Exam.objects", "line_number": 1176, "usage_type": "attribute"}, {"api_name": "users.models.Exam", "line_number": 1176, "usage_type": "name"}, {"api_name": "users.models.ExamRecord.objects.filter", "line_number": 1178, "usage_type": "call"}, {"api_name": "users.models.ExamRecord.objects", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "users.models.ExamRecord", "line_number": 1178, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 1189, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1198, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 1198, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1249, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1253, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1257, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 1262, "usage_type": "call"}]} +{"seq_id": "531877610", "text": "import sys\r\nimport os\r\nimport csv\r\nimport math\r\nimport json\r\nimport random\r\nfrom datetime import datetime\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\n\r\nfrom scipy.spatial.distance import euclidean\r\nfrom fastdtw import fastdtw\r\nfrom collections import OrderedDict\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import PowerTransformer\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.decomposition import FastICA\r\nfrom sklearn.manifold import MDS\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.cross_decomposition import PLSRegression\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom scipy.stats import entropy\r\n\r\nfrom flask import *\r\nfrom flask_cors import CORS\r\n\r\nimport pythons.models.IttiKoch1998.pySaliencyMap as pySaliencyMap\r\n# import pythons.models.IttiKoch1998.pySaliencyMapDefs as pySaliencyMapDefs\r\n\r\n# init dataset name, feature types, and stimulus type\r\nSTI_DATASET = \"\"\r\nSTI_CLASS = []\r\nPARTICIPANT = []\r\nFEATURE = []\r\nFEATURE_ordered = [\"intensity\", \"color\", \"orientation\", \"curvature\", \"center_bias\", \"entropy_rate\", \"log_spectrum\", \"HOG\"]\r\nCOLORS = [\"#e41a1c\", \"#377eb8\", \"#4daf4a\", \"#984ea3\", \"#ff7f00\", \"#ffff33\", \"#a65628\", \"#f781bf\", \"#999999\"]\r\nRAW_DATA_LIST = []\r\nPATCH_SIZE = 20\r\nTHRESHOLD_SM = 50\r\n\r\napp = Flask(__name__)\r\nif __name__ == '__main__':\r\n app.jinja_env.auto_reload = True\r\n app.config['TEMPLATES_AUTO_RELOAD'] = True\r\n app.run(debug=True)\r\nCORS(app)\r\n\r\n############################\r\n# saliency model functions #\r\n############################\r\ndef gen_saliency_map(model_name, stimulus, outPath):\r\n if model_name == \"IttiKoch1998\":\r\n saliency_IttiKoch1998(stimulus, outPath)\r\n else:\r\n print(\"ERROR: unavailable saliency model selected\")\r\n\r\ndef saliency_IttiKoch1998(stimulus, outPath):\r\n stiHeight, stiWidth = stimulus.shape[:2]\r\n sm = pySaliencyMap.pySaliencyMap(stiWidth, stiHeight)\r\n saliencyMap = sm.SMGetSM(stimulus)\r\n cvtedImg = cv2.convertScaleAbs(saliencyMap, alpha=(255.0))\r\n cv2.imwrite(outPath, cvtedImg)\r\n\r\ndef gen_difference_map(gt, sm, outPath):\r\n diff = gt - sm\r\n cv2.imwrite(\"./static/__cache__/gt-sm.png\", diff)\r\n diff2 = sm - gt\r\n cv2.imwrite(\"./static/__cache__/sm-gt.png\", diff2)\r\n dm = sm.copy()\r\n BLACK_COLOR = np.array([0, 0, 0])\r\n thr = 10\r\n if len(gt) == len(sm) and len(gt[0]) == len(sm[0]):\r\n for i in range(0, len(gt)):\r\n for j in range(0, len(gt[i])):\r\n replaceArr = np.array([0, 0, 0])\r\n if dm[i][j][0] <= thr or dm[i][j][1] <= thr or dm[i][j][2] <= thr:\r\n if dm[i][j][0] == dm[i][j][1] and dm[i][j][0] == dm[i][j][2]:\r\n dm[i][j] = np.array([0, 0, 0])\r\n if np.array_equal(gt[i][j], BLACK_COLOR) and np.array_equal(dm[i][j], BLACK_COLOR):\r\n replaceArr = np.array([0, 0, 0])\r\n elif not(np.array_equal(gt[i][j], BLACK_COLOR)) and not(np.array_equal(dm[i][j], BLACK_COLOR)):\r\n # Red color\r\n # replaceArr = np.array([228, 26, 28])\r\n replaceArr = np.array([255, 0, 0])\r\n elif not(np.array_equal(gt[i][j], BLACK_COLOR)) and np.array_equal(dm[i][j], BLACK_COLOR):\r\n # Blue color\r\n replaceArr = np.array([0, 0, 255])\r\n elif np.array_equal(gt[i][j], BLACK_COLOR) and not(np.array_equal(dm[i][j], BLACK_COLOR)):\r\n # Green color\r\n replaceArr = np.array([0, 255, 0])\r\n else:\r\n replaceArr = np.array([255, 255, 255])\r\n dm[i][j] = replaceArr\r\n else:\r\n print(\"ERROR: different shape\")\r\n cv2.imwrite(outPath, dm)\r\n\r\ndef gen_discrete_saliency_map(sm, threshold):\r\n dis_sm = sm.copy()\r\n for i in range(0, len(dis_sm)):\r\n for j in range(0, len(dis_sm[i])):\r\n if dis_sm[i][j][0] < threshold:\r\n dis_sm[i][j] = np.array([0, 0, 0])\r\n else:\r\n dis_sm[i][j] = np.array([255, 255, 255])\r\n cv2.imwrite(\"./static/__cache__/discrete_saliency_map.png\", dis_sm)\r\n\r\n########################################\r\n# saliency evaluation metirc fucntions #\r\n########################################\r\ndef calculate_evaluation_metric(em, gt, sm):\r\n s_map_norm = normalize_map(sm)\r\n if em == \"IG\":\r\n return IG(s_map_norm, gt, gt)\r\n elif em == \"AUC\":\r\n return AUC(s_map_norm, gt)\r\n elif em == \"sAUC\":\r\n return SAUC(s_map_norm, gt, gt)\r\n elif em == \"NSS\":\r\n return NSS(sm, gt)\r\n elif em == \"CC\":\r\n return CC(sm, gt) \r\n elif em == \"KLDiv\":\r\n return KLdiv(sm, gt)\r\n elif em == \"SIM\":\r\n return SIM(s_map_norm, gt)\r\n else:\r\n print(\"ERROR: unavailable evaluation metric selected\")\r\n return -999\r\n\r\ndef NSS(saliency_map, ground_truth_map):\r\n \"\"\"\"\r\n normalized scanpath saliency between two different\r\n saliency maps as the mean value of the normalized saliency map at\r\n fixation locations.\r\n Computer NSS score.\r\n :param saliency_map : predicted saliency map\r\n :param fixation_map : ground truth saliency map.\r\n :return score: float : score\r\n \"\"\"\r\n if not isinstance(saliency_map, np.ndarray):\r\n saliency_map = np.array(saliency_map)\r\n\r\n if not isinstance(ground_truth_map, np.ndarray):\r\n ground_truth_map = np.array(ground_truth_map)\r\n\r\n if saliency_map.size != ground_truth_map.size:\r\n saliency_map = cv2.resize(saliency_map, dsize=(ground_truth_map.shape[1], ground_truth_map.shape[0]))\r\n # saliency_map = imresize(saliency_map, fixation_map.shape)\r\n\r\n MAP = (saliency_map - saliency_map.mean()) / (saliency_map.std())\r\n mask = ground_truth_map.astype(np.bool)\r\n score = MAP[mask].mean()\r\n return score\r\n\r\ndef CC(saliency_map, ground_truth_map):\r\n \"\"\"\r\n This finds the linear correlation coefficient between two different\r\n saliency maps (also called Pearson's linear coefficient).\r\n score=1 or -1 means the maps are correlated\r\n score=0 means the maps are completely uncorrelated\r\n saliencyMap1 and saliencyMap2 are 2 real-valued matrices\r\n Computer CC score .\r\n :param saliency_map : first saliency map\r\n :param saliency_map_gt : second saliency map.\r\n :return score: float : score\r\n \"\"\"\r\n if not isinstance(saliency_map, np.ndarray):\r\n saliency_map = np.array(saliency_map, dtype=np.float32)\r\n elif saliency_map.dtype != np.float32:\r\n saliency_map = saliency_map.astype(np.float32)\r\n\r\n if not isinstance(ground_truth_map, np.ndarray):\r\n ground_truth_map = np.array(ground_truth_map, dtype=np.float32)\r\n elif saliency_map.dtype != np.float32:\r\n ground_truth_map = ground_truth_map.astype(np.float32)\r\n\r\n if saliency_map.size != ground_truth_map.size:\r\n saliency_map = cv2.resize(saliency_map, dsize=(ground_truth_map.shape[1], ground_truth_map.shape[0]))\r\n # saliency_map = imresize(saliency_map, ground_truth_map.shape)\r\n\r\n saliency_map = (saliency_map - saliency_map.mean()) / (saliency_map.std())\r\n ground_truth_map = (ground_truth_map - ground_truth_map.mean()) / (ground_truth_map.std())\r\n score = np.corrcoef(saliency_map.flatten(),ground_truth_map.flatten())[0][1]\r\n return score\r\n\r\n# def KLdiv(saliency_map, ground_truth_map):\r\n# \"\"\"\r\n# This finds the KL-divergence between two different saliency maps when\r\n# viewed as distributions: it is a non-symmetric measure of the information\r\n# lost when saliencyMap is used to estimate fixationMap.\r\n# Computer KL-divergence.\r\n# :param saliency_map : predicted saliency map\r\n# :param fixation_map : ground truth saliency map.\r\n# :return score: float : score\r\n# \"\"\"\r\n# if saliency_map.size != ground_truth_map.size:\r\n# saliency_map = cv2.resize(saliency_map, dsize=(ground_truth_map.shape[1], ground_truth_map.shape[0]))\r\n\r\n# if not isinstance(saliency_map, np.ndarray):\r\n# saliency_map = np.array(saliency_map, dtype=np.float32)\r\n# elif saliency_map.dtype != np.float32:\r\n# saliency_map = saliency_map.astype(np.float32)\r\n\r\n# if not isinstance(ground_truth_map, np.ndarray):\r\n# ground_truth_map = np.array(ground_truth_map, dtype=np.float32)\r\n# elif ground_truth_map.dtype != np.float32:\r\n# ground_truth_map = ground_truth_map.astype(np.float32)\r\n\r\n# EPS = np.finfo(np.float32).eps\r\n# # the function will normalize maps before computing Kld\r\n# score = entropy(saliency_map.flatten() + EPS, ground_truth_map.flatten() + EPS)\r\n# return score\r\n\r\ndef KLdiv(s_map,gt):\r\n s_map = s_map/(np.sum(s_map)*1.0)\r\n gt = gt/(np.sum(gt)*1.0)\r\n eps = 2.2204e-16\r\n return np.sum(gt * np.log(eps + gt/(s_map + eps)))\r\n\r\ndef AUC(saliency_map, ground_truth_map):\r\n \"\"\"Computes AUC for given saliency map 'saliency_map' and given\r\n fixation map 'fixation_map'\r\n \"\"\"\r\n def area_under_curve(predicted, actual, labelset):\r\n def roc_curve(predicted, actual, cls):\r\n si = np.argsort(-predicted)\r\n tp = np.cumsum(np.single(actual[si]==cls))\r\n fp = np.cumsum(np.single(actual[si]!=cls))\r\n tp = tp/np.sum(actual==cls)\r\n fp = fp/np.sum(actual!=cls)\r\n tp = np.hstack((0.0, tp, 1.0))\r\n fp = np.hstack((0.0, fp, 1.0))\r\n return tp, fp\r\n def auc_from_roc(tp, fp):\r\n h = np.diff(fp)\r\n auc = np.sum(h*(tp[1:]+tp[:-1]))/2.0\r\n return auc\r\n\r\n tp, fp = roc_curve(predicted, actual, np.max(labelset))\r\n auc = auc_from_roc(tp, fp)\r\n return auc\r\n\r\n ground_truth_map = (ground_truth_map>0.7).astype(int)\r\n salShape = saliency_map.shape\r\n fixShape = ground_truth_map.shape\r\n\r\n predicted = saliency_map.reshape(salShape[0]*salShape[1], -1, order='F').flatten()\r\n actual = ground_truth_map.reshape(fixShape[0]*fixShape[1], -1, order='F').flatten()\r\n labelset = np.arange(2)\r\n return area_under_curve(predicted, actual, labelset)\r\n\r\ndef SAUC(saliency_map, ground_truth_map, shuf_map=np.zeros((480,640)), step_size=.01):\r\n # shuf_map=np.zeros(ground_truth_map.shape)\r\n # shuf_map = ground_truth_map\r\n \"\"\"\r\n please cite: https://github.com/NUS-VIP/salicon-evaluation\r\n calculates shuffled-AUC score.\r\n :param salinecy_map : predicted saliency map\r\n :param fixation_map : ground truth saliency map.\r\n :return score: int : score\r\n \"\"\"\r\n saliency_map -= np.min(saliency_map)\r\n ground_truth_map = np.vstack(np.where(ground_truth_map!=0)).T\r\n \r\n if np.max(saliency_map) > 0:\r\n saliency_map = saliency_map / np.max(saliency_map)\r\n Sth = np.asarray([ saliency_map[y-1][x-1] for y,x in ground_truth_map ])\r\n \r\n Nfixations = len(ground_truth_map)\r\n others = np.copy(shuf_map)\r\n for y,x in ground_truth_map:\r\n others[y-1][x-1] = 0\r\n\r\n ind = np.nonzero(others) # find fixation locations on other images\r\n nFix = shuf_map[ind]\r\n randfix = saliency_map[ind]\r\n Nothers = sum(nFix)\r\n\r\n allthreshes = np.arange(0,np.max(np.concatenate((Sth, randfix), axis=0)),step_size)\r\n allthreshes = allthreshes[::-1]\r\n tp = np.zeros(len(allthreshes)+2)\r\n fp = np.zeros(len(allthreshes)+2)\r\n tp[-1]=1.0\r\n fp[-1]=1.0\r\n tp[1:-1]=[float(np.sum(Sth >= thresh))/Nfixations for thresh in allthreshes]\r\n fp[1:-1]=[float(np.sum(nFix[randfix >= thresh]))/Nothers for thresh in allthreshes]\r\n score = np.trapz(tp,fp)\r\n return score\r\n\r\ndef IG(saliency_map, ground_truth_map, baseline_map=np.zeros((480,640))):\r\n \"\"\"\r\n please cite:\r\n calculates Information gain score.\r\n :param salinecy_map : predicted saliency map\r\n :param fixation_map : ground truth saliency map.\r\n :param baseline_fixation_map : a baseline fixtion map\r\n :return score: int : score\r\n \"\"\"\r\n if saliency_map.size != ground_truth_map.size:\r\n saliency_map = cv2.resize(saliency_map, dsize=(ground_truth_map.shape[1], ground_truth_map.shape[0]))\r\n\r\n if not isinstance(saliency_map, np.ndarray):\r\n saliency_map = np.array(saliency_map, dtype=np.float32)\r\n elif saliency_map.dtype != np.float32:\r\n saliency_map = saliency_map.astype(np.float32)\r\n\r\n if not isinstance(ground_truth_map, np.ndarray):\r\n ground_truth_map = np.array(ground_truth_map, dtype=np.float32)\r\n elif ground_truth_map.dtype != np.float32:\r\n ground_truth_map = ground_truth_map.astype(np.float32)\r\n\r\n if not isinstance(baseline_map, np.ndarray):\r\n baseline_map = np.array(baseline_map, dtype=np.float32)\r\n elif ground_truth_map.dtype != np.float32:\r\n baseline_map = baseline_map.astype(np.float32)\r\n\r\n saliency_map = (saliency_map - saliency_map.min()) / (saliency_map.max() - saliency_map.min())\r\n saliency_map = saliency_map / saliency_map.sum()\r\n baseline_map = (baseline_map - baseline_map.min()) / (baseline_map.max() - baseline_map.min())\r\n baseline_map = baseline_map / baseline_map.sum()\r\n fixs = ground_truth_map.astype(np.bool)\r\n EPS = np.finfo(np.float32).eps\r\n return (np.log2(EPS + saliency_map[fixs]) - np.log2(EPS + baseline_map[fixs])).mean()\r\n\r\ndef SIM(saliency_map, ground_truth_map):\r\n \"\"\"\r\n Compute similarity score.\r\n :param saliency_map : predicted saliency map\r\n :param fixation_map : ground truth saliency map.\r\n :return score: float : score\r\n \"\"\"\r\n if saliency_map.size != ground_truth_map.size:\r\n saliency_map = cv2.resize(saliency_map, dsize=(ground_truth_map.shape[1], ground_truth_map.shape[0]))\r\n \r\n if not isinstance(saliency_map, np.ndarray):\r\n saliency_map = np.array(saliency_map, dtype=np.float32)\r\n elif saliency_map.dtype != np.float32:\r\n saliency_map = saliency_map.astype(np.float32)\r\n\r\n if not isinstance(ground_truth_map, np.ndarray):\r\n ground_truth_map = np.array(ground_truth_map, dtype=np.float32)\r\n elif ground_truth_map.dtype != np.float32:\r\n ground_truth_map = ground_truth_map.astype(np.float32)\r\n\r\n saliency_map = (saliency_map - saliency_map.min()) / (saliency_map.max() - saliency_map.min())\r\n saliency_map = saliency_map / saliency_map.sum()\r\n ground_truth_map = (ground_truth_map - ground_truth_map.min()) / (ground_truth_map.max() - ground_truth_map.min())\r\n ground_truth_map = ground_truth_map / ground_truth_map.sum()\r\n return np.minimum(saliency_map, ground_truth_map).sum()\r\n\r\ndef normalize_map(s_map):\r\n # normalize the salience map (as done in MIT code)\r\n norm_s_map = (s_map - np.min(s_map))/((np.max(s_map)-np.min(s_map))*1.0)\r\n return norm_s_map\r\n \r\n\r\n######################\r\n# overview functions #\r\n######################\r\ndef overview_count(stimulusNames, datasetName, semanticClass):\r\n fixDirPath = \"./static/fix/\"+ datasetName +\"/\"+ semanticClass +\"/\"\r\n countList = []\r\n for stiFileFullName in stimulusNames:\r\n stiName = stiFileFullName.split(\".\")[0]\r\n stiExe = stiFileFullName.split(\".\")[1]\r\n humanFixationMapPath = \"./static/ground_truth/\"+ datasetName +\"/\"+ semanticClass +\"/\"+ stiName +\".jpg\"\r\n \r\n humanFixationMap = cv2.imread(humanFixationMapPath)\r\n fixFileDirPath = fixDirPath + stiName +\"_\"+ stiExe +\"/\"\r\n fixFileList = os.listdir(fixFileDirPath)\r\n patchList = []\r\n patchList_on = []\r\n patchList_out = []\r\n for fixFileName in fixFileList:\r\n path = fixFileDirPath + fixFileName\r\n pDF = pd.read_csv(path, header=None)\r\n pList = pDF.values.tolist()\r\n for _p in pList:\r\n patchList.append([_p[0], _p[1]])\r\n labelVal = label_groundTruthFixationMap(humanFixationMap, int(_p[0]), int(_p[1]))\r\n if labelVal == 0:\r\n patchList_out.append([_p[0], _p[1]])\r\n else:\r\n patchList_on.append([_p[0], _p[1]])\r\n countList.append([stiFileFullName, len(fixFileList), len(patchList), len(patchList_on), len(patchList_out)])\r\n return countList\r\n\r\ndef overview_count_sm(stimulusNames, datasetName, semanticClass, sm_model):\r\n fixDirPath = \"./static/fix/\"+ datasetName +\"/\"+ semanticClass +\"/\"\r\n countList = []\r\n for stiFileFullName in stimulusNames:\r\n stiName = stiFileFullName.split(\".\")[0]\r\n stiExe = stiFileFullName.split(\".\")[1]\r\n smPath = \"./static/models/\" + sm_model +\"/\"+ datasetName +\"-\"+ semanticClass +\"-\"+ stiName +\".jpg\"\r\n stiPath = \"./static/stimulus/\" + datasetName +\"/\"+ semanticClass +\"/\"+ stiFileFullName\r\n stimulus = cv2.imread(stiPath)\r\n if not(os.path.exists(smPath)):\r\n gen_saliency_map(sm_model, stimulus, smPath)\r\n saliencyMap = cv2.imread(smPath)\r\n \r\n fixFileDirPath = fixDirPath + stiName +\"_\"+ stiExe +\"/\"\r\n fixFileList = os.listdir(fixFileDirPath)\r\n patchList = []\r\n patchList_on = []\r\n patchList_out = []\r\n for fixFileName in fixFileList:\r\n path = fixFileDirPath + fixFileName\r\n pDF = pd.read_csv(path, header=None)\r\n pList = pDF.values.tolist()\r\n for _p in pList:\r\n patchList.append([_p[0], _p[1]])\r\n labelVal = label_saliencyMap(saliencyMap, int(_p[0]), int(_p[1]), THRESHOLD_SM)\r\n if labelVal == 0:\r\n patchList_out.append([_p[0], _p[1]])\r\n else:\r\n patchList_on.append([_p[0], _p[1]])\r\n countList.append([stiFileFullName, len(fixFileList), len(patchList), len(patchList_on), len(patchList_out)])\r\n return countList\r\n\r\n######################\r\n# general funcations #\r\n######################\r\ndef makeJSON(_path, _data):\r\n wf = open(_path, \"w\", newline='', encoding='utf-8')\r\n wf.write(json.dumps(_data))\r\n wf.close()\r\n\r\n################################\r\n# clustering related functions #\r\n################################\r\ndef label_groundTruthFixationMap(_gt, _x, _y):\r\n if np.array_equal(_gt[_y][_x], np.array([0, 0, 0])):\r\n return 0\r\n else:\r\n return 1\r\n\r\ndef label_saliencyMap(_sm, _x, _y, threshold):\r\n if _sm[_y][_x][0] < threshold:\r\n return 0\r\n else:\r\n return 1\r\n\r\ndef generate_discrete_groundTruthFixationMap(_gt):\r\n _dgtfmPath = \"./static/__cache__/discrete_ground_truth_fixation_map.png\"\r\n gtCopy = _gt.copy()\r\n for i in range(0, len(gtCopy)):\r\n for j in range(0, len(gtCopy[i])):\r\n replaceArr = np.array([255, 255, 255])\r\n if np.array_equal(gtCopy[i][j], np.array([0, 0, 0])):\r\n replaceArr = np.array([0, 0, 0])\r\n gtCopy[i][j] = replaceArr\r\n cv2.imwrite(_dgtfmPath, gtCopy)\r\n\r\ndef getFeatureMeanVal(_featDF, _x, _y, _stiWidth, _stiHeight, _patchSize):\r\n meanVal = 0\r\n min_x = int(_x - _patchSize/2)\r\n max_x = int(_x + _patchSize/2)\r\n if min_x < 0:\r\n min_x = 0\r\n if max_x > _stiWidth-1:\r\n max_x = int(_stiWidth-1)\r\n min_y = int(_y - _patchSize/2)\r\n max_y = int(_y + _patchSize/2)\r\n if min_y < 0:\r\n min_y = 0\r\n if max_y > _stiHeight-1:\r\n max_y = int(_stiHeight-1)\r\n featNP = _featDF.to_numpy()\r\n # print(\"top: %d, bottom: %d, left: %d, right: %d\"%(min_y, max_y, min_x, max_x))\r\n patch = featNP[min_y:max_y, min_x:max_x]\r\n # print(patch.shape)\r\n meanVal = patch.mean()\r\n return meanVal\r\n\r\ndef featureIndividualNormalizations(nMethods, df, featureList, frontCols):\r\n # print(\"Feature Normalization methods: \"+nMethods)\r\n tdf = df[frontCols]\r\n dfIndex = len(frontCols)\r\n for i in range(0, len(nMethods)):\r\n cCount = dfIndex + i\r\n nm = nMethods[i]\r\n featureName = featureList[i]\r\n print(\"Normalization: \"+featureName+\"::\"+nm)\r\n cDF = df[featureName]\r\n if nm == \"min_max\":\r\n _ndf = nm_minMax(cDF)\r\n tdf.insert(cCount, featureName, _ndf, True)\r\n elif nm == \"z_score\":\r\n _ndf = nm_zScore(cDF)\r\n tdf.insert(cCount, featureName, _ndf, True)\r\n elif nm == \"yeo_johonson\":\r\n _ndf = nm_yeoJohnson(cDF)\r\n tdf.insert(cCount, featureName, _ndf, True)\r\n elif nm == \"yeo_johonson_min_max\":\r\n _ndf = nm_yeoJohnson_minMax(cDF)\r\n tdf.insert(cCount, featureName, _ndf, True)\r\n elif nm == \"raw\":\r\n tdf.insert(cCount, featureName, cDF, True)\r\n else:\r\n print(\"ERROR: unavailavle normalization method selected: \"+featureName)\r\n tdf.insert(cCount, featureName, cDF, True)\r\n return tdf\r\n\r\ndef nm_minMax(cDF):\r\n scaler = MinMaxScaler()\r\n _tf = scaler.fit_transform(cDF.values.reshape(-1, 1))\r\n return _tf\r\n\r\ndef nm_zScore(cDF):\r\n scaler = StandardScaler()\r\n _tf = scaler.fit_transform(cDF.values.reshape(-1, 1))\r\n return _tf\r\n\r\ndef nm_yeoJohnson(cDF):\r\n scaler = PowerTransformer(method='yeo-johnson')\r\n _tf = scaler.fit_transform(cDF.values.reshape(-1, 1))\r\n return _tf\r\n\r\ndef nm_yeoJohnson_minMax(cDF):\r\n print(\"debug point 1\")\r\n _np = nm_yeoJohnson(cDF)\r\n _df = pd.DataFrame(_np)\r\n _tf = nm_minMax(_df)\r\n return _tf\r\n\r\n\r\ndef dataTransformation(tMethod, df, featureList, frontCols):\r\n print(\"Data transformation method: \"+tMethod)\r\n if tMethod == \"raw\":\r\n return df\r\n elif tMethod == \"min_max\":\r\n return dt_minMax(df, featureList, frontCols)\r\n elif tMethod == \"z_score\":\r\n return dt_zScore(df, featureList, frontCols)\r\n elif tMethod == \"yeo_johonson\":\r\n return dt_yeoJohnson(df, featureList, frontCols)\r\n elif tMethod == \"yeo_johonson_min_max\":\r\n return dt_yeoJohnson_minMax(df, featureList, frontCols)\r\n else:\r\n print(\"ERROR: unavailable data transformation method selected\")\r\n return df\r\n\r\ndef dt_minMax(df, featureList, frontCols):\r\n tfDF = df[frontCols]\r\n colCount = len(frontCols)-1\r\n for featureName in featureList:\r\n colCount = colCount+1\r\n colFeatDF = df[featureName]\r\n scaler = MinMaxScaler()\r\n _tf = scaler.fit_transform(colFeatDF.values.reshape(-1, 1))\r\n tfDF.insert(colCount, featureName, _tf, True)\r\n # tfDF[featureName] = _tf\r\n return tfDF\r\n\r\ndef dt_zScore(df, featureList, frontCols):\r\n tfDF = df[frontCols]\r\n colCount = len(frontCols)-1\r\n for featureName in featureList:\r\n colCount = colCount+1\r\n colFeatDF = df[featureName]\r\n scaler = StandardScaler()\r\n _tf = scaler.fit_transform(colFeatDF.values.reshape(-1, 1))\r\n tfDF.insert(colCount, featureName, _tf, True)\r\n # tfDF[featureName] = _tf\r\n return tfDF\r\n\r\ndef dt_yeoJohnson(df, featureList, frontCols):\r\n tfDF = df[frontCols]\r\n colCount = len(frontCols)-1\r\n for featureName in featureList:\r\n colCount = colCount+1\r\n colFeatDF = df[featureName]\r\n scaler = PowerTransformer(method='yeo-johnson')\r\n _tf = scaler.fit_transform(colFeatDF.values.reshape(-1, 1))\r\n tfDF.insert(colCount, featureName, _tf, True)\r\n # tfDF[featureName] = _tf\r\n return tfDF\r\n\r\ndef dt_yeoJohnson_minMax(df, featureList, frontCols):\r\n _df_1 = dt_yeoJohnson(df, featureList, frontCols)\r\n _df_2 = dt_minMax(_df_1, featureList, frontCols)\r\n return _df_2\r\n\r\ndef dataClustering(cMethod):\r\n print(\"Data clustering method: \"+cMethod)\r\n if cMethod == \"random_forest\":\r\n return dc_randomForest()\r\n elif cMethod == \"dbscan\":\r\n return dc_dbscan()\r\n elif cMethod == \"hdbscan\":\r\n return dc_hdbscan()\r\n elif cMethod == \"k_means\":\r\n return dc_kMeans()\r\n else:\r\n print(\"ERROR: unavailable data clustering method selected\")\r\n\r\ndef dc_randomForest():\r\n return 0\r\n\r\ndef dc_dbscan():\r\n return 0\r\n\r\ndef dc_hdbscan():\r\n return 0\r\n\r\ndef dc_kMeans():\r\n return 0\r\n\r\ndef dimensionReduction(drMethod, df, featureList):\r\n print(\"Dimension reduction method: \"+drMethod)\r\n if drMethod == \"MDS\":\r\n return dr_MDS(df, featureList)\r\n elif drMethod == \"PCA\":\r\n return dr_PCA(df, featureList)\r\n elif drMethod == \"ICA\":\r\n return dr_ICA(df, featureList)\r\n elif drMethod == \"t_SNE\":\r\n return dr_TSNE(df, featureList)\r\n elif drMethod == \"PLS\":\r\n return dr_PLS(df, featureList)\r\n else:\r\n print(\"ERROR: unavailable dimension reduction method selected\")\r\n return df[['x', 'y']]\r\n\r\ndef dr_MDS(df, featureList):\r\n drm = MDS(n_components=2, random_state=0)\r\n drDF = drm.fit_transform(df[featureList])\r\n return drDF\r\n\r\ndef dr_PCA(df, featureList):\r\n drm = PCA(n_components=2, random_state=0)\r\n drDF = drm.fit_transform(df[featureList])\r\n return drDF\r\n\r\ndef dr_ICA(df, featureList):\r\n drm = FastICA(n_components=2, random_state=0)\r\n drDF = drm.fit_transform(df[featureList])\r\n return drDF\r\n\r\ndef dr_TSNE(df, featureList):\r\n drm = TSNE(learning_rate=100, random_state=0)\r\n drDF = drm.fit_transform(df[featureList])\r\n return drDF\r\n\r\ndef dr_TSNE_preplexity(df, iteration, perplexity, featureList):\r\n drm = TSNE(n_components=2, early_exaggeration=iteration, perplexity=perplexity)\r\n drDF = drm.fit_transform(df[featureList], df[[\"label\"]])\r\n return drDF\r\n\r\ndef dr_PLS(df, featureList):\r\n drm = PLSRegression(n_components=2)\r\n drDF, _ = drm.fit_transform(df[featureList], df[\"label\"])\r\n return drDF\r\n\r\n################################\r\n# processing related functions #\r\n################################\r\ndef featureNameConverter(featName):\r\n cName = \"\"\r\n if featName == \"intensity\":\r\n cName = \"f0\"\r\n elif featName == \"color\":\r\n cName = \"f1\"\r\n elif featName == \"orientation\":\r\n cName = \"f2\"\r\n elif featName == \"curvature\":\r\n cName = \"f3\"\r\n elif featName == \"center_bias\":\r\n cName = \"f4\"\r\n elif featName == \"entropy_rate\":\r\n cName = \"f5\"\r\n elif featName == \"log_spectrum\":\r\n cName = \"f6\"\r\n elif featName == \"HOG\":\r\n cName = \"f7\"\r\n else:\r\n cName = \"f8\"\r\n return cName\r\n\r\ndef featureNameConverter_short(featName):\r\n cName = \"\"\r\n if featName == \"center_bias\":\r\n cName = \"center_b\"\r\n elif featName == \"entropy_rate\":\r\n cName = \"entropy_r\"\r\n elif featName == \"log_spectrum\":\r\n cName = \"log_s\"\r\n else:\r\n cName = featName\r\n return cName\r\n\r\n#######################################\r\n# scanpath analysis related functions #\r\n#######################################\r\ndef gridBasedTransform_style1(_scanpath, _stiWidth, _stiHeight):\r\n s = ''\r\n width = _stiWidth\r\n height = _stiHeight\r\n _mat = [['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'j'], ['k', 'l', 'm', 'n', 'o'], ['p', 'q', 'r', 's', 't'], ['u', 'v', 'w', 'x', 'y']]\r\n wFactor = width/5\r\n hFactor = height/5\r\n for _fix in _scanpath:\r\n colIdx = int(_fix[0]/wFactor)\r\n rowIdx = int(_fix[1]/hFactor)\r\n s = s+_mat[rowIdx][colIdx]\r\n return s\r\n\r\ndef _c(ca,i,j,P,Q):\r\n if ca[i,j] > -1:\r\n return ca[i,j]\r\n elif i == 0 and j == 0:\r\n ca[i,j] = euc_dist(P[0],Q[0])\r\n elif i > 0 and j == 0:\r\n ca[i,j] = max(_c(ca,i-1,0,P,Q),euc_dist(P[i],Q[0]))\r\n elif i == 0 and j > 0:\r\n ca[i,j] = max(_c(ca,0,j-1,P,Q),euc_dist(P[0],Q[j]))\r\n elif i > 0 and j > 0:\r\n ca[i,j] = max(min(_c(ca,i-1,j,P,Q),_c(ca,i-1,j-1,P,Q),_c(ca,i,j-1,P,Q)),euc_dist(P[i],Q[j]))\r\n else:\r\n ca[i,j] = float(\"inf\")\r\n return ca[i,j]\r\n\r\ndef euc_dist(pt1, pt2):\r\n return math.sqrt((pt2[0]-pt1[0])*(pt2[0]-pt1[0])+(pt2[1]-pt1[1])*(pt2[1]-pt1[1]))\r\n\r\ndef makeFixationRange(_fixs, _rad, _stiWidth, _stiHeight):\r\n _WIDTH = _stiWidth-1\r\n _HEIGHT = _stiHeight-1\r\n _rFixs = []\r\n for _f in _fixs:\r\n _x = int(_f[0])\r\n _y = int(_f[1])\r\n _xMin = int(_x - _rad)\r\n _xMax = int(_x + _rad)\r\n _yMin = int(_y - _rad)\r\n _yMax = int(_y + _rad)\r\n if _xMin < 0:\r\n _xMin = 0\r\n if _xMax > _WIDTH:\r\n _xMax = _WIDTH\r\n if _yMin < 0:\r\n _yMin = 0\r\n if _yMax > _HEIGHT:\r\n _yMax = _HEIGHT\r\n for i in range(_xMin, _xMax):\r\n for j in range(_yMin, _yMax):\r\n _rFixs.append([i, j])\r\n tuplist = [tuple(x) for x in _rFixs]\r\n ans = list(OrderedDict.fromkeys(tuplist))\r\n return ans\r\n\r\ndef getIntersection(_f1, _f2):\r\n _iCount = 0\r\n for _p in _f1:\r\n _xp = _p[0]\r\n _yp = _p[1]\r\n for _f in _f2:\r\n _xf = _f[0]\r\n _yf = _f[1]\r\n if _xp == _xf and _yp == _yf:\r\n _iCount += 1\r\n return _iCount\r\n\r\ndef lcs(X, Y, m, n):\r\n if m == 0 or n == 0:\r\n return 0\r\n elif X[m-1] == Y[n-1]:\r\n return 1 + lcs(X, Y, m-1, n-1)\r\n else:\r\n return max(lcs(X, Y, m, n-1), lcs(X, Y, m-1, n))\r\n \r\ndef JaccardCoefficientDistance(_path1, _path2, radius, _stiWidth, _stiHeight):\r\n _fSet1 = makeFixationRange(_path1, radius, _stiWidth, _stiHeight)\r\n _fSet2 = makeFixationRange(_path2, radius, _stiWidth, _stiHeight)\r\n setIntersection = getIntersection(_fSet1, _fSet2)\r\n # print(len(_fSet1))\r\n # print(len(_fSet2))\r\n # print(setIntersection)\r\n ji = setIntersection/(len(_fSet1)+len(_fSet2)-setIntersection)\r\n return ji\r\n\r\ndef BoundingBodx():\r\n return 0\r\n\r\ndef DynamicTimeWarping(_path1, _path2):\r\n distance, path = fastdtw(_path1, _path2, dist=euclidean)\r\n return distance\r\n\r\ndef LongestCommonSubsequence(_path1, _path2, _stiWidth, _stiHeight):\r\n s1 = gridBasedTransform_style1(_path1, _stiWidth, _stiHeight)\r\n s2 = gridBasedTransform_style1(_path2, _stiWidth, _stiHeight)\r\n _v = lcs(s1, s2, len(s1), len(s2))\r\n return _v\r\n\r\ndef FreechetDistance(_path1, _path2):\r\n ca = np.ones((len(_path1), len(_path2)))\r\n ca = np.multiply(ca ,-1)\r\n dist = _c(ca, len(_path1)-1, len(_path2)-1, _path1, _path2)\r\n return dist\r\n\r\ndef EditDistance(_path1, _path2, _stiWidth, _stiHeight, debug=False):\r\n s1 = gridBasedTransform_style1(_path1, _stiWidth, _stiHeight)\r\n s2 = gridBasedTransform_style1(_path2, _stiWidth, _stiHeight)\r\n \r\n if len(s1) < len(s2):\r\n return EditDistance(_path2, _path1, _stiWidth, _stiHeight, debug)\r\n if len(s2) == 0:\r\n return len(s1)\r\n previous_row = range(len(s2) + 1)\r\n for i, c1 in enumerate(s1):\r\n current_row = [i + 1]\r\n for j, c2 in enumerate(s2):\r\n insertions = previous_row[j + 1] + 1\r\n deletions = current_row[j] + 1\r\n substitutions = previous_row[j] + (c1 != c2)\r\n current_row.append(min(insertions, deletions, substitutions))\r\n if debug:\r\n print(current_row[1:])\r\n previous_row = current_row\r\n return previous_row[-1]\r\n\r\ndef computeScanpathSimilarity(_method, _path1, _path2, _stiImg):\r\n stiHeight, stiWidth = _stiImg.shape[:2]\r\n simVal = 0\r\n if _method == 'jd':\r\n simVal = JaccardCoefficientDistance(_path1, _path2, PATCH_SIZE/2, stiWidth, stiHeight)\r\n elif _method == 'dtw':\r\n simVal = DynamicTimeWarping(_path1, _path2)\r\n elif _method == 'lcs':\r\n simVal = LongestCommonSubsequence(_path1, _path2, stiWidth, stiHeight)\r\n elif _method == 'fd':\r\n simVal = FreechetDistance(_path1, _path2)\r\n elif _method == 'ed':\r\n # simVal = EditDistance(_path1, _path2, True)\r\n simVal = EditDistance(_path1, _path2, stiWidth, stiHeight)\r\n elif _method == 'bb':\r\n print(_method)\r\n else:\r\n print(\"ERROR: wrong scanpath similarity calculation method selected\")\r\n return simVal\r\n\r\ndef IQRclusteringRange(Q1, Q3, val):\r\n clu = 0\r\n IQR = Q3 - Q1\r\n minimum = Q1-(1.5*IQR)\r\n maxmum = Q3+(1.5*IQR)\r\n\r\n if val < minimum:\r\n clu = 2\r\n elif val >= minimum and val < Q1:\r\n clu = 3\r\n elif val >= Q1 and val < Q3:\r\n clu = 4\r\n elif val >= Q3 and val < maxmum:\r\n clu = 5\r\n else:\r\n clu = 6\r\n return clu\r\n\r\n###########################\r\n# interaction update APIs #\r\n###########################\r\n@app.route('/api/brushParallelCoordinateChart/updateActives', methods=['POST'])\r\ndef brushParallelCoordinateChart_UpdateActives():\r\n print(\"brushParallelCoordinateChart_UpdateActives\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_INTERACTION_ACTIVES = request.form['actives']\r\n splitData = GET_INTERACTION_ACTIVES.split(\"/\")\r\n # print(\"splitData\")\r\n # print(splitData)\r\n userInteractions = []\r\n if splitData == ['0']:\r\n userInteractions.append([False, -999, -999])\r\n else:\r\n for act in splitData:\r\n actives = act.split(\"-\")[0]\r\n extents_s = act.split(\"-\")[1]\r\n extents_e = act.split(\"-\")[2]\r\n userInteractions.append([actives, extents_s, extents_e])\r\n \r\n makeJSON(\"./static/__cache__/pc_userInteractions.json\", userInteractions)\r\n response['status'] = 'success'\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/aggregation/selectedObserverDataAggregation', methods=['POST'])\r\ndef selectedObserverDataAggregation():\r\n print(\"selectedObserverDataAggregation\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_SELECTED_OBSERVER = request.form['observerInfo']\r\n splitData = GET_SELECTED_OBSERVER.split(\"/\")\r\n datsetName = splitData[0]\r\n # semanticClass = splitData[1]\r\n # imgFileName = splitData[2]\r\n # stimulusName = imgFileName.split(\".\")[0]\r\n # stimulusExe = imgFileName.split(\".\")[1]\r\n # stimulusDirName = stimulusName +\"_\"+ stimulusExe\r\n observerName = splitData[4]\r\n \r\n fixFilePathList = []\r\n stiNameList = []\r\n fixPath = \"./static/fix/\"+ datsetName +\"/\"\r\n semanticList = os.listdir(fixPath)\r\n for semanticDir in semanticList:\r\n stiDirPath = fixPath + semanticDir +\"/\"\r\n stiDirList = os.listdir(stiDirPath)\r\n sFixList = []\r\n sStiNameList = []\r\n for stiDir in stiDirList:\r\n obFixPath = stiDirPath + stiDir +\"/\"+ observerName +\".csv\"\r\n if os.path.exists(obFixPath):\r\n sFixList.append(obFixPath)\r\n sStiNameList.append([semanticDir, stiDir.split(\"_\")[0]])\r\n fixFilePathList.append(sFixList)\r\n stiNameList.append(sStiNameList)\r\n\r\n gtList = []\r\n for i in range(0, len(stiNameList)):\r\n gts = []\r\n for j in range(0, len(stiNameList[i])):\r\n semanticDir = stiNameList[i][j][0]\r\n stiName = stiNameList[i][j][1]\r\n gtPath = \"./static/ground_truth/\"+ datsetName +\"/\"+ semanticDir +\"/\"+ stiName +\".jpg\"\r\n if not(os.path.exists(gtPath)):\r\n print(\"ERROR: NO STIMULUS IMAGE FILE: \"+gtPath)\r\n gt = cv2.imread(gtPath)\r\n gts.append(gt)\r\n gtList.append(gts)\r\n\r\n fixCountList = []\r\n for i in range(0, len(fixFilePathList)):\r\n fixCounts = []\r\n for j in range(0, len(fixFilePathList[i])):\r\n obFixFilePath = fixFilePathList[i][j]\r\n # print(obFixFilePath)\r\n scDir = obFixFilePath.split(\"/\")[4]\r\n imgDir = obFixFilePath.split(\"/\")[5]\r\n df = pd.read_csv(obFixFilePath, header=None)\r\n l0 = 0\r\n l1 = 0\r\n _tList = []\r\n for _fp in df.values.tolist():\r\n _x = int(_fp[0])\r\n _y = int(_fp[1])\r\n _t = float(_fp[2])\r\n _tList.append(_t)\r\n _label = label_groundTruthFixationMap(gtList[i][j], _x, _y)\r\n if _label == 0:\r\n l0 = l0+1\r\n else:\r\n l1 = l1+1\r\n fixCounts.append([scDir, imgDir, l0, l1, _tList])\r\n fixCountList.append(fixCounts)\r\n\r\n response['status'] = 'success'\r\n response['obFixCountList'] = fixCountList\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/multiPatchVisualization/selectDivUpdate', methods=['POST'])\r\ndef multiPatchVisualization_selectDivUpdate():\r\n print(\"multiPatchVisualization_selectDivUpdate\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n SELECTED_DIV_CACHE_PATH = request.form['cachePath']\r\n splitPath = SELECTED_DIV_CACHE_PATH.split(\"-\")\r\n get_dataset = splitPath[0].split(\"/cache_\")[1]\r\n get_semantic = splitPath[1]\r\n get_stimulus = splitPath[2]\r\n get_data_transformation = splitPath[3]\r\n get_dimension_reduction = splitPath[4]\r\n\r\n slectedDivData = [get_dataset, get_semantic, get_stimulus, get_data_transformation, get_dimension_reduction]\r\n makeJSON(\"./static/__cache__/select_div.json\", slectedDivData)\r\n\r\n response['status'] = 'success'\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/horizontalBarChart/selectObserverUpdate', methods=['POST'])\r\ndef horizontalBarChart_selectObserverUpdate():\r\n print(\"horizontalBarChart_selectObserverUpdate\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_OBSERVER_INFO = request.form['selectedObserver']\r\n SPLIT_OBSERVER_INFO = GET_OBSERVER_INFO.split(\"/\")\r\n obID = SPLIT_OBSERVER_INFO[0]\r\n obName = SPLIT_OBSERVER_INFO[1]\r\n obIndex = SPLIT_OBSERVER_INFO[2]\r\n \r\n slectedObData = [obID, obName, obIndex]\r\n makeJSON(\"./static/__cache__/select_ob.json\", slectedObData)\r\n\r\n response['status'] = 'success'\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n\r\n\r\n\r\n\r\n##################################\r\n# patch clustering analysis APIs #\r\n##################################\r\n@app.route('/api/clustering/loadCacheList', methods=['POST'])\r\ndef clustering_loadCacheList():\r\n print(\"clustering_loadCacheList\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n cacheDirPath = \"./static/__cache__/\"\r\n filesInDir = os.listdir(cacheDirPath)\r\n cacheFileList = []\r\n for fileName in filesInDir:\r\n fileType = fileName.split(\"_\")[0]\r\n if fileType == \"cache\":\r\n cacheFileList.append(fileName)\r\n\r\n response['status'] = 'success'\r\n response['caches'] = cacheFileList\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/clustering/processingMultiParms', methods=['POST'])\r\ndef clustering_processingMultiParams():\r\n print(\"clustering_processingMultiParams\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_SELECTED_STIMULUS_INFO_STR = request.form['selectedStimulus']\r\n dataTransformationMethod = \"yeo_johonson_min_max\"\r\n dimensionReductionMethod = \"t_SNE\"\r\n GET_SELECTED_STIMULUS_INFO = GET_SELECTED_STIMULUS_INFO_STR.split(\"-\")\r\n print(PARTICIPANT)\r\n print(GET_SELECTED_STIMULUS_INFO)\r\n\r\n datasetName = GET_SELECTED_STIMULUS_INFO[0].split(\"/\")[0]\r\n semanticClassName = GET_SELECTED_STIMULUS_INFO[0].split(\"/\")[1]\r\n stimulusFileName = GET_SELECTED_STIMULUS_INFO[0].split(\"/\")[2]\r\n stimulusName = stimulusFileName.split(\".\")[0]\r\n stimulusExe = stimulusFileName.split(\".\")[1]\r\n stimulusDirName = stimulusName +\"_\"+ stimulusExe\r\n \r\n groundTruthPath = \"./static/ground_truth/\" + datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".jpg\"\r\n groundTruthFixMap = cv2.imread(groundTruthPath)\r\n generate_discrete_groundTruthFixationMap(groundTruthFixMap)\r\n fmHeight, fmWidth = groundTruthFixMap.shape[:2]\r\n\r\n featureDirPath = \"./static/feature/\"\r\n featureDFList = []\r\n for _f in FEATURE_ordered:\r\n featureFilePath = featureDirPath + _f +\"/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".csv\"\r\n featureDF = pd.read_csv(featureFilePath, header=None)\r\n featureDFList.append(featureDF)\r\n \r\n featureDirPath = \"./static/feature/\"\r\n featureDFList = []\r\n for _f in FEATURE_ordered:\r\n featureFilePath = featureDirPath + _f +\"/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".csv\"\r\n featureDF = pd.read_csv(featureFilePath)\r\n featureDFList.append(featureDF)\r\n\r\n PARTICIPANT_LIST = []\r\n for obInfo in PARTICIPANT:\r\n _dataName = obInfo.split(\"/\")[0]\r\n _className = obInfo.split(\"/\")[1]\r\n _stiNameDir = obInfo.split(\"/\")[2]\r\n if datasetName == _dataName and semanticClassName == _className and stimulusDirName == _stiNameDir:\r\n PARTICIPANT_LIST.append(obInfo)\r\n\r\n fixDirPath = \"./static/fix/\"\r\n rawDataList = []\r\n aggregatedDataList = []\r\n for observer in PARTICIPANT_LIST:\r\n userId = observer.split(\"/\")[3]\r\n fixFilePath = fixDirPath + datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"+ userId+\".csv\"\r\n ob = datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"+ userId\r\n fixDF = pd.read_csv(fixFilePath, header=None)\r\n fixList = fixDF.values.tolist()\r\n for _fp in fixList:\r\n _x = int(_fp[0])\r\n _y = int(_fp[1])\r\n _t = float(_fp[2])\r\n _label = label_groundTruthFixationMap(groundTruthFixMap, _x, _y)\r\n rawDataList.append([ob, _x, _y, _label, _t])\r\n _midStack = [ob, _x, _y, _label]\r\n for i in range(0, len(FEATURE_ordered)):\r\n fMean = getFeatureMeanVal(featureDFList[i], _x, _y, fmWidth, fmHeight, PATCH_SIZE)\r\n _midStack.append(fMean)\r\n _midStack.append(_t)\r\n aggregatedDataList.append(_midStack)\r\n \r\n dfCols = [\"id\", \"x\", \"y\", \"label\"]\r\n dfCols_full = [\"id\", \"x\", \"y\", \"label\"]\r\n for featName in FEATURE_ordered:\r\n dfCols.append(featName)\r\n dfCols_full.append(featName)\r\n dfCols_full.append(\"duration\")\r\n aggDF = pd.DataFrame(aggregatedDataList, columns=dfCols_full)\r\n \r\n # data transformation\r\n dfFrontCols = [\"id\", \"x\", \"y\", \"label\"]\r\n tfDF = dataTransformation(dataTransformationMethod, aggDF[dfCols], FEATURE_ordered, dfFrontCols)\r\n\r\n cacheFilePathList = []\r\n patchProcessDataLists = []\r\n # dimension reduction\r\n for iteration in [12, 15, 20, 25, 30]:\r\n for perplexity in [30, 50, 70, 100, 200]:\r\n dr = dr_TSNE_preplexity(tfDF, iteration, perplexity, FEATURE_ordered)\r\n drDF = pd.DataFrame(dr, columns=['x', 'y'])\r\n indexCount = 0\r\n processedDF = pd.DataFrame(aggDF['id'].values.tolist(), columns=['id'])\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"x\", drDF['x'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"y\", drDF['y'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"label\", aggDF['label'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n for featName in FEATURE_ordered:\r\n processedDF.insert(indexCount, featName, tfDF[featName].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"duration\", aggDF['duration'].values.tolist(), True)\r\n processedDataList = processedDF.values.tolist()\r\n patchProcessDataLists.append(processedDataList)\r\n cacheFilePath = \"./static/__cache__/pcache/cache_\"+ datasetName +\"-\"+ semanticClassName +\"-\"+ stimulusDirName +\"-\"+ dataTransformationMethod +\"-\"+ dimensionReductionMethod +\"-\"+ str(len(PARTICIPANT)) +\".csv\"\r\n cacheFilePathList.append([cacheFilePath, True])\r\n\r\n response['status'] = 'success'\r\n response['processingData'] = patchProcessDataLists\r\n response['cacheFilePath'] = cacheFilePathList\r\n response['rawDataList'] = rawDataList\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/clustering/processingMulti', methods=['POST'])\r\ndef clustering_processingMulti():\r\n print(\"clustering_processingMulti\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_TRANSFORMATION_METHOD_STR = request.form['transformationMethod']\r\n GET_DIMEN_REDUCTION_METHOD_STR = request.form['dimensionReductionMethod']\r\n GET_SELECTED_STIMULUS_INFO_STR = request.form['selectedStimulus']\r\n\r\n GET_SELECTED_STIMULUS_INFO = GET_SELECTED_STIMULUS_INFO_STR.split(\"-\")\r\n GET_TRANSFORMATION_METHOD = GET_TRANSFORMATION_METHOD_STR.split(\"/\")\r\n GET_DIMEN_REDUCTION_METHOD = GET_DIMEN_REDUCTION_METHOD_STR.split(\"/\")\r\n print(GET_TRANSFORMATION_METHOD)\r\n print(GET_DIMEN_REDUCTION_METHOD)\r\n print(PARTICIPANT)\r\n print(GET_SELECTED_STIMULUS_INFO)\r\n\r\n datasetName = GET_SELECTED_STIMULUS_INFO[0].split(\"/\")[0]\r\n semanticClassName = GET_SELECTED_STIMULUS_INFO[0].split(\"/\")[1]\r\n stimulusFileName = GET_SELECTED_STIMULUS_INFO[0].split(\"/\")[2]\r\n stimulusName = stimulusFileName.split(\".\")[0]\r\n stimulusExe = stimulusFileName.split(\".\")[1]\r\n stimulusDirName = stimulusName +\"_\"+ stimulusExe\r\n \r\n groundTruthPath = \"./static/ground_truth/\" + datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".jpg\"\r\n groundTruthFixMap = cv2.imread(groundTruthPath)\r\n generate_discrete_groundTruthFixationMap(groundTruthFixMap)\r\n fmHeight, fmWidth = groundTruthFixMap.shape[:2]\r\n\r\n cacheFilePathList = []\r\n pExistsFlag = True\r\n for dtm in GET_TRANSFORMATION_METHOD:\r\n for drm in GET_DIMEN_REDUCTION_METHOD:\r\n cacheFilePath = \"./static/__cache__/pcache/cache_\"+ datasetName +\"-\"+ semanticClassName +\"-\"+ stimulusDirName +\"-\"+ dtm +\"-\"+ drm +\"-\"+ str(len(PARTICIPANT)) +\".csv\"\r\n existsFlag = True\r\n if not(os.path.exists(cacheFilePath)):\r\n existsFlag = False\r\n pExistsFlag = False\r\n cacheFilePathList.append([cacheFilePath, existsFlag])\r\n \r\n patchProcessDataLists = []\r\n rawDataList = []\r\n if pExistsFlag == True:\r\n print(\"All cache file exists\")\r\n for cPath in cacheFilePathList:\r\n p = cPath[0]\r\n aggDF = pd.read_csv(p)\r\n patchProcessDataLists.append(aggDF.values.tolist())\r\n fixPath = \"./static/fix/\" + datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"\r\n fixFileList = os.listdir(fixPath)\r\n for fileName in fixFileList:\r\n ffPath = fixPath + fileName\r\n fixDF = pd.read_csv(ffPath, header=None)\r\n fixList = fixDF.values.tolist()\r\n for _p in fixList:\r\n _x = int(_p[0])\r\n _y = int(_p[1])\r\n _t = float(_p[2])\r\n _label = label_groundTruthFixationMap(groundTruthFixMap, _x, _y)\r\n rawDataList.append([datasetName+\"/\"+semanticClassName+\"/\"+stimulusDirName+\"/\"+fileName.split(\".\")[0], _x, _y, _label, _t])\r\n else:\r\n print(\"Some cache files do not exists\")\r\n for cPath in cacheFilePathList:\r\n p = cPath[0]\r\n eFlag = cPath[1]\r\n if eFlag == True:\r\n aggDF = pd.read_csv(p)\r\n patchProcessDataLists.append(aggDF.values.tolist())\r\n else:\r\n fixDirPath = \"./static/fix/\" + datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"\r\n featureDirPath = \"./static/feature/\"\r\n featureDFList = []\r\n for _f in FEATURE_ordered:\r\n featureFilePath = featureDirPath + _f +\"/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".csv\"\r\n featureDF = pd.read_csv(featureFilePath, header=None)\r\n featureDFList.append(featureDF)\r\n PARTICIPANT_LIST = []\r\n \r\n for obInfo in PARTICIPANT:\r\n _dataName = obInfo.split(\"/\")[0]\r\n _className = obInfo.split(\"/\")[1]\r\n _stiNameDir = obInfo.split(\"/\")[2]\r\n if datasetName == _dataName and semanticClassName == _className and stimulusDirName == _stiNameDir:\r\n PARTICIPANT_LIST.append(obInfo)\r\n \r\n aggregatedDataList = []\r\n for observer in PARTICIPANT_LIST:\r\n userId = observer.split(\"/\")[3]\r\n fixFilePath = fixDirPath + datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"+ userId+\".csv\"\r\n ob = datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"+ userId\r\n fixDF = pd.read_csv(fixFilePath, header=None)\r\n fixList = fixDF.values.tolist()\r\n for _fp in fixList:\r\n _x = int(_fp[0])\r\n _y = int(_fp[1])\r\n _t = float(_fp[2])\r\n _label = label_groundTruthFixationMap(groundTruthFixMap, _x, _y)\r\n rawDataList.append([ob, _x, _y, _label, _t])\r\n _midStack = [ob, _x, _y, _label]\r\n for i in range(0, len(FEATURE_ordered)):\r\n fMean = getFeatureMeanVal(featureDFList[i], _x, _y, fmWidth, fmHeight, PATCH_SIZE)\r\n _midStack.append(fMean)\r\n aggregatedDataList.append(_midStack)\r\n dfCols = [\"id\", \"x\", \"y\", \"label\"]\r\n for featName in FEATURE_ordered:\r\n dfCols.append(featName)\r\n aggDF = pd.DataFrame(aggregatedDataList, columns=dfCols)\r\n\r\n\r\n # data transformation\r\n dtm = p.split(\"-\")[3]\r\n drm = p.split(\"-\")[4]\r\n dfFrontCols = [\"id\", \"x\", \"y\", \"label\"]\r\n tfDF = dataTransformation(dtm, aggDF, FEATURE_ordered, dfFrontCols)\r\n # dimension reduction\r\n dr = dimensionReduction(drm, tfDF, FEATURE_ordered)\r\n drDF = pd.DataFrame(dr, columns=['x', 'y'])\r\n \r\n indexCount = 0\r\n processedDF = pd.DataFrame(aggDF['id'].values.tolist(), columns=['id'])\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"x\", drDF['x'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"y\", drDF['y'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"label\", aggDF['label'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n for featName in FEATURE_ordered:\r\n processedDF.insert(indexCount, featName, tfDF[featName].values.tolist(), True)\r\n indexCount = indexCount+1\r\n\r\n processedDF.to_csv(p, mode='w', index=False, header=True)\r\n processedDataList = processedDF.values.tolist()\r\n patchProcessDataLists.append(processedDataList)\r\n if os.path.exists(p):\r\n cPath[1] = True\r\n\r\n response['status'] = 'success'\r\n response['processingData'] = patchProcessDataLists\r\n response['cacheFilePath'] = cacheFilePathList\r\n response['rawDataList'] = rawDataList\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/clustering/processing', methods=['POST'])\r\ndef clustering_processing():\r\n print(\"clustering_processing\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_USE_CACHE_FLAG = request.form['cacheUseFlag']\r\n GET_TRANSFORMATION_METHOD = request.form['transformationMethod']\r\n GET_DIMEN_REDUCTION_METHOD = request.form['dimensionReductionMethod']\r\n GET_SELECTED_STIMULUS_INFO_STR = request.form['selectedStimulus']\r\n GET_SELECTED_STIMULUS_INFO = GET_SELECTED_STIMULUS_INFO_STR.split(\"-\")\r\n print(GET_USE_CACHE_FLAG)\r\n print(GET_TRANSFORMATION_METHOD)\r\n print(GET_DIMEN_REDUCTION_METHOD)\r\n print(PARTICIPANT)\r\n print(GET_SELECTED_STIMULUS_INFO)\r\n \r\n cacheFilePath = \"\"\r\n fixDirPath = \"./static/fix/\"\r\n featureDirPath = \"./static/feature/\"\r\n groundTruthDirPath = \"./static/ground_truth/\"\r\n aggregatedDataList = []\r\n \r\n rawDataList = []\r\n if GET_USE_CACHE_FLAG == \"use\":\r\n print(\"Use cache flag on: \"+cacheFilePath)\r\n stiInfo = GET_SELECTED_STIMULUS_INFO[0]\r\n \r\n dataName = stiInfo.split(\"/\")[0]\r\n className = stiInfo.split(\"/\")[1]\r\n stiFileName = stiInfo.split(\"/\")[2]\r\n stiName = stiFileName.split(\".\")[0]\r\n stiExe = stiFileName.split(\".\")[1]\r\n stiNameDir = stiName +\"_\"+ stiExe\r\n cacheFilePath = \"./static/__cache__/pcache/cache_\"+ dataName +\"-\"+ className +\"-\"+ stiNameDir +\"-\"+ GET_TRANSFORMATION_METHOD +\"-\"+ GET_DIMEN_REDUCTION_METHOD +\"-\"+ str(len(PARTICIPANT)) +\".csv\"\r\n\r\n gtFixMapPath = groundTruthDirPath + dataName +\"/\"+ className +\"/\"+ stiName +\".jpg\"\r\n groundTruthFixMap = cv2.imread(gtFixMapPath)\r\n generate_discrete_groundTruthFixationMap(groundTruthFixMap)\r\n fmHeight, fmWidth = groundTruthFixMap.shape[:2]\r\n\r\n \r\n fixPath = fixDirPath + dataName +\"/\"+ className +\"/\"+ stiNameDir + \"/\"\r\n fixDataNameList = os.listdir(fixPath)\r\n for fileName in fixDataNameList:\r\n ffPath = fixPath+fileName\r\n fixDF = pd.read_csv(ffPath, header=None)\r\n fixList = fixDF.values.tolist()\r\n # print(fixFilePath)\r\n for _fp in fixList:\r\n _x = int(_fp[0])\r\n _y = int(_fp[1])\r\n _t = float(_fp[2])\r\n _label = label_groundTruthFixationMap(groundTruthFixMap, _x, _y)\r\n rawDataList.append([dataName+\"/\"+className+\"/\"+stiNameDir+\"/\"+fileName.split(\".\")[0] ,_x, _y, _label, _t])\r\n else:\r\n for stiInfo in GET_SELECTED_STIMULUS_INFO:\r\n dataName = stiInfo.split(\"/\")[0]\r\n className = stiInfo.split(\"/\")[1]\r\n stiFileName = stiInfo.split(\"/\")[2]\r\n stiName = stiFileName.split(\".\")[0]\r\n stiExe = stiFileName.split(\".\")[1]\r\n stiNameDir = stiName +\"_\"+ stiExe\r\n \r\n cacheFilePath = \"./static/__cache__/pcache/cache_\"+ dataName +\"-\"+ className +\"-\"+ stiNameDir +\"-\"+ GET_TRANSFORMATION_METHOD +\"-\"+ GET_DIMEN_REDUCTION_METHOD +\"-\"+ str(len(PARTICIPANT)) +\".csv\"\r\n \r\n gtFixMapPath = groundTruthDirPath + dataName +\"/\"+ className +\"/\"+ stiName +\".jpg\"\r\n groundTruthFixMap = cv2.imread(gtFixMapPath)\r\n generate_discrete_groundTruthFixationMap(groundTruthFixMap)\r\n fmHeight, fmWidth = groundTruthFixMap.shape[:2]\r\n featureDFList = []\r\n for _f in FEATURE_ordered:\r\n featureFilePath = featureDirPath + _f +\"/\"+ dataName +\"/\"+ className +\"/\"+ stiName +\".csv\"\r\n featureDF = pd.read_csv(featureFilePath, header=None)\r\n featureDFList.append(featureDF)\r\n\r\n PARTICIPANT_LIST = []\r\n for obInfo in PARTICIPANT:\r\n _dataName = obInfo.split(\"/\")[0]\r\n _className = obInfo.split(\"/\")[1]\r\n _stiNameDir = obInfo.split(\"/\")[2]\r\n if dataName == _dataName and className == _className and stiNameDir == _stiNameDir:\r\n PARTICIPANT_LIST.append(obInfo)\r\n\r\n for observer in PARTICIPANT_LIST:\r\n # stiExt = stiNameDir.split(\"_\")[1]\r\n userId = observer.split(\"/\")[3]\r\n fixFilePath = fixDirPath + dataName +\"/\"+ className +\"/\"+ stiNameDir +\"/\"+ userId+\".csv\"\r\n ob = dataName +\"/\"+ className +\"/\"+ stiNameDir +\"/\"+ userId\r\n fixDF = pd.read_csv(fixFilePath, header=None)\r\n fixList = fixDF.values.tolist()\r\n # print(fixFilePath)\r\n for _fp in fixList:\r\n _x = int(_fp[0])\r\n _y = int(_fp[1])\r\n _t = float(_fp[2])\r\n _label = label_groundTruthFixationMap(groundTruthFixMap, _x, _y)\r\n rawDataList.append([ob, _x, _y, _label, _t])\r\n _midStack = [ob, _x, _y, _label]\r\n for i in range(0, len(FEATURE_ordered)):\r\n fMean = getFeatureMeanVal(featureDFList[i], _x, _y, fmWidth, fmHeight, PATCH_SIZE)\r\n _midStack.append(fMean)\r\n _midStack.append(_t)\r\n aggregatedDataList.append(_midStack)\r\n \r\n aggDF = []\r\n if GET_USE_CACHE_FLAG == \"use\":\r\n aggDF = pd.read_csv(cacheFilePath)\r\n else:\r\n dfCols = [\"id\", \"x\", \"y\", \"label\"]\r\n for featName in FEATURE_ordered:\r\n dfCols.append(featName)\r\n dfCols.append(\"duration\")\r\n aggDF = pd.DataFrame(aggregatedDataList, columns=dfCols)\r\n \r\n\r\n # data transformation\r\n dfFrontCols = [\"id\", \"x\", \"y\", \"label\"]\r\n tfDF = dataTransformation(GET_TRANSFORMATION_METHOD, aggDF, FEATURE_ordered, dfFrontCols)\r\n # dimension reduction\r\n dr = dimensionReduction(GET_DIMEN_REDUCTION_METHOD, tfDF, FEATURE_ordered)\r\n drDF = pd.DataFrame(dr, columns=['x', 'y'])\r\n \r\n indexCount = 0\r\n processedDF = pd.DataFrame(aggDF['id'].values.tolist(), columns=['id'])\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"x\", drDF['x'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"y\", drDF['y'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"label\", aggDF['label'].values.tolist(), True)\r\n indexCount = indexCount+1\r\n for featName in FEATURE_ordered:\r\n processedDF.insert(indexCount, featName, tfDF[featName].values.tolist(), True)\r\n indexCount = indexCount+1\r\n processedDF.insert(indexCount, \"duration\", aggDF['duration'].values.tolist(), True)\r\n\r\n dataColumns = processedDF.columns.values.tolist()\r\n processedDataList = processedDF.values.tolist()\r\n processedDF.to_csv(cacheFilePath, mode='w', index=False, header=True)\r\n\r\n response['status'] = 'success'\r\n response['dataColumns'] = dataColumns\r\n response['processingData'] = processedDataList\r\n response['cacheFilePath'] = cacheFilePath.split(\".\")[1]+\".csv\"\r\n response['rawDataList'] = rawDataList\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n\r\n\r\n##########################\r\n# scanpath analysis APIs #\r\n##########################\r\n@app.route('/api/scanpath/calcSimilarity', methods=['POST'])\r\ndef scanpath_calc_similarity():\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_SELECTED_SCANPATH_SIMILARITY_METHOD = request.form['scanpathSimilarityMethod']\r\n GET_SELECTED_SCANPATH_FILES = request.form['selectedScanpaths']\r\n splitSelectedScanpathFiles = GET_SELECTED_SCANPATH_FILES.split(\"-\")\r\n GET_SELECTED_MAIN_SCANPATH = request.form['mainScanpath']\r\n\r\n print(GET_SELECTED_SCANPATH_SIMILARITY_METHOD)\r\n # print(GET_SELECTED_SCANPATH_FILES)\r\n print(splitSelectedScanpathFiles)\r\n print(GET_SELECTED_MAIN_SCANPATH)\r\n\r\n # load scanpath files\r\n fixationDirPath = \"./static/fix/\"\r\n scanpathList = []\r\n for _fixFile in splitSelectedScanpathFiles:\r\n mainFlag = False\r\n if _fixFile == GET_SELECTED_MAIN_SCANPATH:\r\n mainFlag = True\r\n filePath = fixationDirPath + _fixFile +\".csv\"\r\n fixDF = pd.read_csv(filePath, header=None)\r\n fixList = fixDF.values.tolist()\r\n scanpathList.append([mainFlag, _fixFile, fixList])\r\n \r\n # check main scanpath and save it\r\n # check fixations from same visual stimluls\r\n mainScanpathIndex = 0\r\n mainScanpath = []\r\n # prevStiName = \"\"\r\n diffStiFlag = False\r\n for i in range(0, len(scanpathList)):\r\n if scanpathList[i][0] == True:\r\n mainScanpathIndex = i\r\n mainScanpath = scanpathList[i]\r\n # curStiName = scanpathList[i][1].split(\"/\")[2]\r\n # if i==0:\r\n # prevStiName = curStiName\r\n # else:\r\n # if prevStiName != curStiName:\r\n # diffStiFlag = True\r\n # prevStiName = curStiName\r\n \r\n # calculation scanpath similarity\r\n stiPath = \"\"\r\n if diffStiFlag == False:\r\n splitValue = mainScanpath[1].split(\"/\")\r\n datasetName = splitValue[0]\r\n className = splitValue[1]\r\n splitStiName = splitValue[2].split(\"_\")\r\n stiName = \"\"\r\n if len(splitStiName) == 2:\r\n stiName = splitStiName[0] +\".\"+ splitStiName[1]\r\n else:\r\n for i in range(0, len(splitStiName)):\r\n if i == len(splitStiName)-1:\r\n stiName = stiName +\".\"+ splitStiName[i]\r\n else:\r\n if stiName == \"\":\r\n stiName = splitStiName[i]\r\n else:\r\n stiName = stiName +\"_\"+ splitStiName[i]\r\n stiPath = \"./static/stimulus/\"+ datasetName +\"/\"+ className +\"/\"+ stiName\r\n # print(\"stimulus path: \")\r\n # print(stiPath)\r\n scanpathSimilarityValueList = []\r\n stiImage = cv2.imread(stiPath)\r\n for i in range(0, len(scanpathList)):\r\n if i == mainScanpathIndex:\r\n continue\r\n targetScanpath = scanpathList[i][2]\r\n sv = computeScanpathSimilarity(GET_SELECTED_SCANPATH_SIMILARITY_METHOD, mainScanpath[2], targetScanpath, stiImage)\r\n scanpathSimilarityValueList.append([mainScanpath[1], scanpathList[i][1], sv])\r\n # print(\"Similarity between main with\"+str(i)+\" scanpath: \"+str(sv))\r\n \r\n # get scanpath similarity calculation results\r\n similarityValueList = []\r\n for simData in scanpathSimilarityValueList:\r\n similarityValueList.append(simData[2])\r\n \r\n # clustering with IQR\r\n svlSeries = pd.Series(similarityValueList)\r\n Q1 = svlSeries.quantile(.25)\r\n Q3 = svlSeries.quantile(.75)\r\n\r\n similarityBaseClusteringIQR = []\r\n for i in range(0, len(scanpathSimilarityValueList)):\r\n _s = similarityValueList[i]\r\n simClu = IQRclusteringRange(Q1, Q3, _s)\r\n similarityBaseClusteringIQR.append({'main':scanpathSimilarityValueList[i][0], 'target':scanpathSimilarityValueList[i][1], 'svalue':scanpathSimilarityValueList[i][2], 'sclu':simClu})\r\n\r\n \r\n response['status'] = 'success'\r\n response['scanpathSimilarityValues'] = similarityBaseClusteringIQR\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n###################\r\n# processing APIs #\r\n###################\r\n@app.route('/api/processing/genFixationDataList', methods=['POST'])\r\ndef processing_gen_fixationDataList():\r\n print(\"processing_gen_fixationDataList\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_PARTICIPANT = request.form['participantList']\r\n split_get_participant = GET_PARTICIPANT.split(\"-\")\r\n\r\n # generate fixations (scanpath) data list\r\n PARTICIPANT_FIX_FILE_LIST = []\r\n for _participant in split_get_participant:\r\n split_pData = _participant.split(\"/\")\r\n sDataset = split_pData[0]\r\n sSemanticClass = split_pData[1]\r\n sFixDirName = split_pData[2]\r\n sParticipantFixFileName = split_pData[3] +\".csv\"\r\n fixFilePath = \"./static/fix/\"+ sDataset +\"/\"+ sSemanticClass +\"/\"+ sFixDirName +\"/\"+ sParticipantFixFileName\r\n if not(os.path.exists(fixFilePath)):\r\n continue\r\n PARTICIPANT_FIX_FILE_LIST.append([split_pData, fixFilePath])\r\n FIX_DATA_LIST = []\r\n for fixFilePath in PARTICIPANT_FIX_FILE_LIST:\r\n _id = fixFilePath[0]\r\n _path = fixFilePath[1]\r\n df = pd.read_csv(_path, header=None)\r\n dfList = df.values.tolist()\r\n FIX_DATA_LIST.append([_id, dfList])\r\n\r\n # generate aggregated fixation patches image\r\n fixatedPatchPath = \"./static/output_image_patch/\"\r\n patchPos = 0\r\n aggregatedPatchImage = 0\r\n aggregatedPatchPath = \"./static/__cache__/aggregated_patch.png\"\r\n # patchInfo = []\r\n firstFlag = True\r\n for i in range(0, len(FIX_DATA_LIST)):\r\n fixData = FIX_DATA_LIST[i]\r\n dataName = fixData[0][0]\r\n className = fixData[0][1]\r\n stiName = fixData[0][2]\r\n userId = fixData[0][3]\r\n patchDirPath = fixatedPatchPath + dataName +\"/\"+ className +\"/\"+ stiName +\"/\"\r\n scanpath = fixData[1]\r\n _fixs = []\r\n for j in range(0, len(scanpath)):\r\n _fixs.append({'index': str(j).zfill(3), 'px': patchPos, 'py': 0})\r\n patchPath = patchDirPath + userId +\"_\"+ str(j).zfill(3) +\".png\"\r\n # print(patchPath)\r\n patch = 0\r\n if os.path.exists(patchPath):\r\n patch = cv2.imread(patchPath)\r\n if firstFlag == True:\r\n aggregatedPatchImage = patch.copy()\r\n patchPos = patchPos+PATCH_SIZE\r\n firstFlag = False\r\n continue\r\n else:\r\n patch = np.empty((PATCH_SIZE, PATCH_SIZE, 3), dtype=np.uint8)\r\n if firstFlag == True:\r\n aggregatedPatchImage = patch.copy()\r\n patchPos = patchPos+PATCH_SIZE\r\n firstFlag = False\r\n continue\r\n processAggImg = np.hstack((aggregatedPatchImage, patch))\r\n aggregatedPatchImage = processAggImg.copy()\r\n patchPos = patchPos+PATCH_SIZE\r\n FIX_DATA_LIST[i].append(_fixs)\r\n cv2.imwrite(aggregatedPatchPath, aggregatedPatchImage)\r\n\r\n response['status'] = 'success'\r\n response['fixDataList'] = FIX_DATA_LIST\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/processing/loadAllFixationDataList', methods=['POST'])\r\ndef processing_load_allFixationDataList():\r\n global PARTICIPANT\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_STI_NAMES = request.form['stiList']\r\n split_sti_names = GET_STI_NAMES.split(\"-\")\r\n sti_files_list = []\r\n for sti in split_sti_names:\r\n datasetName = sti.split(\"/\")[0]\r\n semanticClassName = sti.split(\"/\")[1]\r\n stiName = sti.split(\"/\")[2].split(\".\")[0]\r\n stiExt = sti.split(\"/\")[2].split(\".\")[1]\r\n sti_files_list.append([datasetName, semanticClassName, stiName, stiExt])\r\n\r\n PARTICIPANT = []\r\n for sti in sti_files_list:\r\n _path = './static/fix/'+ sti[0] +\"/\"+ sti[1] +\"/\"+ sti[2] +\"_\"+ sti[3] +\"/\"\r\n if not(os.path.exists(_path)):\r\n continue\r\n participantList = os.listdir(_path)\r\n for _p in participantList:\r\n _pdata = sti[0] +\"/\"+ sti[1] +\"/\"+ sti[2] +\"_\"+ sti[3] +\"/\" + _p.split(\".\")[0]\r\n PARTICIPANT.append(_pdata)\r\n print(\"PARTICIPANT\")\r\n print(PARTICIPANT)\r\n\r\n # generate fixations (scanpath) data list\r\n PARTICIPANT_FIX_FILE_LIST = []\r\n for _participant in PARTICIPANT:\r\n split_pData = _participant.split(\"/\")\r\n sDataset = split_pData[0]\r\n sSemanticClass = split_pData[1]\r\n sFixDirName = split_pData[2]\r\n sParticipantFixFileName = split_pData[3] +\".csv\"\r\n fixFilePath = \"./static/fix/\"+ sDataset +\"/\"+ sSemanticClass +\"/\"+ sFixDirName +\"/\"+ sParticipantFixFileName\r\n if not(os.path.exists(fixFilePath)):\r\n continue\r\n PARTICIPANT_FIX_FILE_LIST.append([split_pData, fixFilePath])\r\n FIX_DATA_LIST = []\r\n for fixFilePath in PARTICIPANT_FIX_FILE_LIST:\r\n _id = fixFilePath[0]\r\n _path = fixFilePath[1]\r\n df = pd.read_csv(_path, header=None)\r\n dfList = df.values.tolist()\r\n FIX_DATA_LIST.append([_id, dfList])\r\n\r\n # generate aggregated fixation patches image\r\n fixatedPatchPath = \"./static/output_image_patch/\"\r\n patchPos = 0\r\n aggregatedPatchImage = 0\r\n aggregatedPatchPath = \"./static/__cache__/aggregated_patch.png\"\r\n # patchInfo = []\r\n firstFlag = True\r\n for i in range(0, len(FIX_DATA_LIST)):\r\n fixData = FIX_DATA_LIST[i]\r\n dataName = fixData[0][0]\r\n className = fixData[0][1]\r\n stiName = fixData[0][2]\r\n userId = fixData[0][3]\r\n patchDirPath = fixatedPatchPath + dataName +\"/\"+ className +\"/\"+ stiName +\"/\"\r\n scanpath = fixData[1]\r\n _fixs = []\r\n for j in range(0, len(scanpath)):\r\n _fixs.append({'index': str(j).zfill(3), 'px': patchPos, 'py': 0})\r\n patchPath = patchDirPath + userId +\"_\"+ str(j).zfill(3) +\".png\"\r\n # print(patchPath)\r\n patch = 0\r\n if os.path.exists(patchPath):\r\n patch = cv2.imread(patchPath)\r\n if firstFlag == True:\r\n aggregatedPatchImage = patch.copy()\r\n patchPos = patchPos+PATCH_SIZE\r\n firstFlag = False\r\n continue\r\n else:\r\n patch = np.empty((PATCH_SIZE, PATCH_SIZE, 3), dtype=np.uint8)\r\n if firstFlag == True:\r\n aggregatedPatchImage = patch.copy()\r\n patchPos = patchPos+PATCH_SIZE\r\n firstFlag = False\r\n continue\r\n processAggImg = np.hstack((aggregatedPatchImage, patch))\r\n aggregatedPatchImage = processAggImg.copy()\r\n patchPos = patchPos+PATCH_SIZE\r\n FIX_DATA_LIST[i].append(_fixs)\r\n cv2.imwrite(aggregatedPatchPath, aggregatedPatchImage)\r\n \r\n\r\n # load selected stimulus IQR of saliency featuers\r\n featNPList = []\r\n for sti in sti_files_list:\r\n for featName in FEATURE_ordered:\r\n featureFilePath = \"./static/feature/\"+ featName +\"/\"+ sti[0] +\"/\"+ sti[1] +\"/\"+ sti[2] +\".csv\"\r\n featureDF = pd.read_csv(featureFilePath, header=None)\r\n featNPList.append(featureDF.to_numpy())\r\n scaler = MinMaxScaler()\r\n stiIQRList = []\r\n for i in range(0, len(featNPList)):\r\n featNP = featNPList[i]\r\n norNp = scaler.fit_transform(featNP)\r\n q1 = np.quantile(norNp, 0.25)\r\n m = np.quantile(norNp, 0.5)\r\n q3 = np.quantile(norNp, 0.75)\r\n stiIQRList.append([FEATURE_ordered[i], str(q1), str(m), str(q3)])\r\n\r\n # load all stimulus IQR of saliency faetuers\r\n ######################################################################ssssssss\r\n\r\n response['status'] = 'success'\r\n response['participantList'] = PARTICIPANT\r\n response['fixDataList'] = FIX_DATA_LIST\r\n response['stimulusIQR'] = stiIQRList\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/processing/loadFixationDataList', methods=['POST'])\r\ndef processing_load_fixationDataList():\r\n global PARTICIPANT\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_STI_NAMES = request.form['stiList']\r\n split_sti_names = GET_STI_NAMES.split(\"-\")\r\n sti_files_list = []\r\n for sti in split_sti_names:\r\n datasetName = sti.split(\"/\")[0]\r\n semanticClassName = sti.split(\"/\")[1]\r\n stiName = sti.split(\"/\")[2].split(\".\")[0]\r\n stiExt = sti.split(\"/\")[2].split(\".\")[1]\r\n sti_files_list.append([datasetName, semanticClassName, stiName, stiExt])\r\n\r\n PARTICIPANT = []\r\n for sti in sti_files_list:\r\n _path = './static/fix/'+ sti[0] +\"/\"+ sti[1] +\"/\"+ sti[2] +\"_\"+ sti[3] +\"/\"\r\n if not(os.path.exists(_path)):\r\n continue\r\n participantList = os.listdir(_path)\r\n for _p in participantList:\r\n _pdata = sti[0] +\"/\"+ sti[1] +\"/\"+ sti[2] +\"_\"+ sti[3] +\"/\" + _p.split(\".\")[0]\r\n PARTICIPANT.append(_pdata)\r\n \r\n response['status'] = 'success'\r\n response['participantList'] = PARTICIPANT\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n\r\n@app.route('/api/processing/loadStimulusNames', methods=['POST'])\r\ndef processing_load_stimulusFileNames():\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_SEMANTIC_CLASS = request.form['stiClass']\r\n SEMANTIC_CLASS = GET_SEMANTIC_CLASS.split(\"-\")\r\n print(SEMANTIC_CLASS)\r\n STIMULUS_LIST = []\r\n for stiClass in SEMANTIC_CLASS:\r\n datasetName = stiClass.split(\"/\")[0]\r\n semanticClass = stiClass.split(\"/\")[1]\r\n stiDirPath = \"./static/stimulus/\"+datasetName+\"/\"+semanticClass+\"/\"\r\n stiList = os.listdir(stiDirPath)\r\n for stiName in stiList:\r\n _stiPath = stiDirPath + stiName\r\n stiImg = cv2.imread(_stiPath)\r\n stiHeight, stiWidth = stiImg.shape[:2]\r\n STIMULUS_LIST.append([datasetName+\"/\"+semanticClass+\"/\"+stiName, stiWidth, stiHeight])\r\n # print(STIMULUS_LIST)\r\n \r\n response['status'] = 'success'\r\n response['stimulusNames'] = STIMULUS_LIST\r\n \r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/processing/stiDataset', methods=['POST'])\r\ndef processing_stiDataset():\r\n print(\"processing_stiDataset\")\r\n global STI_DATASET\r\n global STI_CLASS\r\n global PARTICIPANT\r\n global FEATURE\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_STI_DATASET = request.form['stiDataset']\r\n STI_DATASET = []\r\n if GET_STI_DATASET == \"all\":\r\n STI_DATASET = os.listdir(\"./static/stimulus/\")\r\n else:\r\n STI_DATASET = GET_STI_DATASET.split(\"/\")\r\n for stiDataset in STI_DATASET:\r\n if stiDataset == \"all\":\r\n STI_DATASET = os.listdir(\"./static/stimulus/\")\r\n break\r\n print(STI_DATASET)\r\n \r\n STI_CLASS = []\r\n for stiDataset in STI_DATASET:\r\n _path = \"./static/stimulus/\"+stiDataset+\"/\"\r\n stiClassList = os.listdir(_path)\r\n STI_CLASS.append([stiDataset, stiClassList])\r\n # print(STI_CLASS)\r\n \r\n FEATURE = []\r\n featureDirPath = \"./static/feature/\"\r\n FEATURE = os.listdir(featureDirPath)\r\n\r\n # move last location of participant selectAction function\r\n spResFilePath = \"./static/sp.csv\"\r\n spDF = pd.read_csv(spResFilePath)\r\n spConvertedList = []\r\n spNanCountList = []\r\n spConvertedColumns = ['group', 'variable', 'value']\r\n \r\n for selectedDataset in STI_DATASET:\r\n _idx = 0\r\n isStidataset = spDF['dataset'] == selectedDataset\r\n filteredSPDF = spDF[isStidataset]\r\n for index, row in filteredSPDF.iterrows():\r\n variable = row['dataset'][0]+str(_idx).zfill(2)\r\n _idx = _idx + 1\r\n for featName in FEATURE:\r\n spCountStr = row[featName]\r\n spSplit = spCountStr.split(\"_\")[1].split(\"/\")\r\n spOverCount = spSplit[0]\r\n spNanCount = spSplit[1]\r\n stiCount = spSplit[2]\r\n spPer = (float(spOverCount)/float(stiCount))*100\r\n spConvertedList.append([featureNameConverter_short(featName), variable, spPer])\r\n # spConvertedList.append([featureNameConverter(featName), variable, spPer])\r\n spNanCountList.append([featureNameConverter_short(featName), variable, int(spNanCount)])\r\n # spNanCountList.append([featureNameConverter(featName), variable, int(spNanCount)])\r\n spCDF = pd.DataFrame(spConvertedList, columns=spConvertedColumns)\r\n spCDF.to_csv(\"./static/__cache__/sp_cvt.csv\", mode='w', index=False, header=True)\r\n spNCDF = pd.DataFrame(spNanCountList, columns=spConvertedColumns)\r\n spNCDF.to_csv(\"./static/__cache__/sp_nan.csv\", mode='w', index=False, header=True)\r\n \r\n\r\n response['status'] = 'success'\r\n response['classList'] = STI_CLASS\r\n response['featureList'] = FEATURE\r\n \r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n\r\n return json.dumps(response)\r\n\r\n#################\r\n# overview APIs #\r\n#################\r\n@app.route('/api/overview', methods=['POST'])\r\ndef overview_calc():\r\n print(\"overview_calc\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_SELECTED_DATAINFO = request.form['semanticClass']\r\n GET_DATASET = GET_SELECTED_DATAINFO.split(\"/\")[0]\r\n GET_SEMANTIC_CLASS = GET_SELECTED_DATAINFO.split(\"/\")[1]\r\n \r\n stimulusPath = \"./static/stimulus/\"+ GET_DATASET +\"/\"+ GET_SEMANTIC_CLASS +\"/\"\r\n stimulusList = os.listdir(stimulusPath)\r\n \r\n OVERVIEW_COUNT_LIST = []\r\n OVERVIEW_COUNT_LIST = overview_count(stimulusList, GET_DATASET, GET_SEMANTIC_CLASS)\r\n \r\n response['status'] = 'success'\r\n response['overview'] = OVERVIEW_COUNT_LIST\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n\r\n return json.dumps(response)\r\n\r\n@app.route('/api/overviewSM', methods=['POST'])\r\ndef overviewSM_calc():\r\n print(\"overviewSM_calc\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n GET_SELECTED_DATAINFO = request.form['semanticClass']\r\n GET_DATASET = GET_SELECTED_DATAINFO.split(\"/\")[0]\r\n GET_SEMANTIC_CLASS = GET_SELECTED_DATAINFO.split(\"/\")[1]\r\n \r\n stimulusPath = \"./static/stimulus/\"+ GET_DATASET +\"/\"+ GET_SEMANTIC_CLASS +\"/\"\r\n stimulusList = os.listdir(stimulusPath)\r\n \r\n # saliency model overview count: initial model is \"IttiKoch1998\"\r\n SELECTED_SALIENCY_MODEL = \"IttiKoch1998\"\r\n OVERVIEW_COUNT_LIST_SM = []\r\n OVERVIEW_COUNT_LIST_SM = overview_count_sm(stimulusList, GET_DATASET, GET_SEMANTIC_CLASS, SELECTED_SALIENCY_MODEL)\r\n \r\n response['status'] = 'success'\r\n response['overviewSM'] = OVERVIEW_COUNT_LIST_SM\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n\r\n return json.dumps(response)\r\n\r\n#######################\r\n# saliency model APIs #\r\n#######################\r\n@app.route('/api/saliency/updateModelSet', methods=['POST'])\r\ndef saliency_updateModelSet():\r\n global RAW_DATA_LIST\r\n print(\"saliency_updateModelSet\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n SELECTED_SALIENCY_MODEL = request.form['saliencyModel']\r\n SELECTED_STIMULUS_INFO = request.form['stimulusInfo']\r\n SELECTED_DATA_TRANSFORMATION_METHOD = request.form['dtMethod']\r\n data_transformation_method = \"min_max\"\r\n if SELECTED_DATA_TRANSFORMATION_METHOD != \"\":\r\n data_transformation_method = SELECTED_DATA_TRANSFORMATION_METHOD\r\n # print(SELECTED_SALIENCY_MODEL)\r\n # print(SELECTED_STIMULUS_INFO)\r\n # print(SELECTED_DATA_TRANSFORMATION_METHOD)\r\n # print(data_transformation_method)\r\n\r\n stiInfoSplit = SELECTED_STIMULUS_INFO.split(\"/\")\r\n datasetName = stiInfoSplit[0]\r\n semanticClassName = stiInfoSplit[1]\r\n stimulusFileName = stiInfoSplit[2]\r\n stimulusName = stiInfoSplit[2].split(\".\")[0]\r\n stimulusExe = stiInfoSplit[2].split(\".\")[1]\r\n stimulusDirName = stimulusName +\"_\"+ stimulusExe\r\n \r\n # load stimulus\r\n stiPath = \"./static/stimulus/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusFileName\r\n print(stiPath)\r\n stimulus = cv2.imread(stiPath)\r\n stiHeight, stiWidth = stimulus.shape[:2]\r\n # generate and load saliency map\r\n smPath = \"./static/models/\"+ SELECTED_SALIENCY_MODEL +\"/\"+ datasetName +\"-\"+ semanticClassName +\"-\"+ stimulusFileName\r\n print(smPath)\r\n gen_saliency_map(SELECTED_SALIENCY_MODEL, stimulus, smPath)\r\n sm = cv2.imread(smPath)\r\n gen_discrete_saliency_map(sm, THRESHOLD_SM)\r\n # load ground truth fixation map\r\n gtPath = \"./static/ground_truth/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".jpg\"\r\n print(gtPath)\r\n gt = cv2.imread(gtPath)\r\n generate_discrete_groundTruthFixationMap(gt)\r\n # generate difference map\r\n dmPath = \"./static/__cache__/difference_map.png\"\r\n print(dmPath)\r\n gen_difference_map(gt, sm, dmPath)\r\n\r\n fixationDataList = []\r\n cachePath = \"./static/__cache__/sCache/\" + datasetName +\"-\"+ semanticClassName +\"-\"+ stimulusDirName +\"-\"+ data_transformation_method +\".csv\"\r\n rawPath = \"./static/__cache__/sCache/\" + datasetName +\"-\"+ semanticClassName +\"-\"+ stimulusDirName +\"-raw\"+\".csv\"\r\n if not(os.path.exists(cachePath)) or not(os.path.exists(rawPath)):\r\n print(\"Generate cache file: \"+cachePath)\r\n # load feature data\r\n featureDFList = []\r\n featureDir = \"./static/feature/\"\r\n for _f in FEATURE_ordered:\r\n featurePath = featureDir + _f +\"/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusName +\".csv\"\r\n featureDF = pd.read_csv(featurePath, header=None)\r\n featureDFList.append(featureDF)\r\n # load fixation data on stimulus\r\n fixDir = \"./static/fix/\"+ datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"\r\n observerList = os.listdir(fixDir)\r\n fixationDfList = []\r\n print(\"observerList\")\r\n print(observerList)\r\n for obFileName in observerList:\r\n _path = fixDir + obFileName\r\n ob = datasetName +\"/\"+ semanticClassName +\"/\"+ stimulusDirName +\"/\"+ obFileName\r\n fixDF = pd.read_csv(_path, header=None)\r\n fixationDfList.append([ob, fixDF])\r\n\r\n for i in range(0, len(fixationDfList)):\r\n ob = fixationDfList[i][0]\r\n for _fp in fixationDfList[i][1].values.tolist():\r\n _x = int(_fp[0])\r\n _y = int(_fp[1])\r\n _t = float(_fp[2])\r\n _label_gt = label_groundTruthFixationMap(gt, _x, _y)\r\n _label_sm = label_saliencyMap(sm, _x, _y, THRESHOLD_SM)\r\n obFixList = [ob, _x, _y, _t, _label_gt, _label_sm]\r\n for j in range(0, len(FEATURE_ordered)):\r\n fMean = getFeatureMeanVal(featureDFList[j], _x, _y, stiWidth, stiHeight, PATCH_SIZE)\r\n obFixList.append(fMean)\r\n fixationDataList.append(obFixList)\r\n \r\n if os.path.exists(rawPath):\r\n rawDF = pd.read_csv(rawPath)\r\n RAW_DATA_LIST = rawDF.values.tolist()\r\n else:\r\n RAW_DATA_LIST = fixationDataList\r\n dfCols = [\"id\", \"x\", \"y\", \"duration\", \"label_gt\", \"label_sm\"]\r\n for featureName in FEATURE_ordered:\r\n dfCols.append(featureName)\r\n rawDF = pd.DataFrame(fixationDataList, columns=dfCols)\r\n rawDF.to_csv(rawPath, mode='w', index=False, header=True)\r\n # data transformation\r\n if data_transformation_method != \"raw\":\r\n dfCols = [\"id\", \"x\", \"y\", \"duration\", \"label_gt\", \"label_sm\"]\r\n for featureName in FEATURE_ordered:\r\n dfCols.append(featureName)\r\n fixationsDF = pd.DataFrame(fixationDataList, columns=dfCols)\r\n dfFrontCols = [\"id\", \"x\", \"y\", \"duration\", \"label_gt\", \"label_sm\"]\r\n tfDF = dataTransformation(data_transformation_method, fixationsDF, FEATURE_ordered, dfFrontCols)\r\n trasnformedFixationList = tfDF.values.tolist()\r\n fixationDataList = trasnformedFixationList\r\n\r\n dfCols = [\"id\", \"x\", \"y\", \"duration\", \"label_gt\", \"label_sm\"]\r\n for featureName in FEATURE_ordered:\r\n dfCols.append(featureName)\r\n saveDF = pd.DataFrame(fixationDataList, columns=dfCols)\r\n saveDF.to_csv(cachePath, mode='w', index=False, header=True)\r\n else:\r\n print(\"Use cache file: \"+cachePath)\r\n cacheDF = pd.read_csv(cachePath)\r\n fixationDataList = cacheDF.values.tolist()\r\n\r\n # calcuate evaluation metrics\r\n evaluation_metrics = [\"IG\", \"AUC\", \"sAUC\", \"NSS\", \"CC\", \"KLDiv\", \"SIM\"]\r\n metricList = []\r\n gt_gray = cv2.imread(gtPath, 0)\r\n sm_gray = cv2.imread(smPath, 0)\r\n for em in evaluation_metrics:\r\n emres = calculate_evaluation_metric(em, gt_gray, sm_gray)\r\n metricList.append(str(emres))\r\n\r\n response['status'] = 'success'\r\n response['smPath'] = smPath.split(\".\")[1]+\".\"+smPath.split(\".\")[2]\r\n response['gtPath'] = gtPath.split(\".\")[1]+\".\"+gtPath.split(\".\")[2]\r\n response['dmPath'] = dmPath.split(\".\")[1]+\".\"+dmPath.split(\".\")[2]\r\n response['patchDataList'] = fixationDataList\r\n response['evaluationMetrics'] = metricList\r\n # response['evaluationMetricsColumns'] = evaluation_metrics\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)\r\n\r\n@app.route('/api/saliency/individualNormalization', methods=['POST'])\r\ndef saliency_individualNormalization():\r\n global RAW_DATA_LIST\r\n print(\"saliency_individualNormalization\")\r\n print(request.form)\r\n response = {}\r\n try:\r\n NORMALIZATION_METHODS = request.form['methods']\r\n SELECTED_STIMULUS_INFO = request.form['stimulusInfo']\r\n METHODS_LIST = NORMALIZATION_METHODS.split(\"/\")\r\n stiInfoSplit = SELECTED_STIMULUS_INFO.split(\"/\")\r\n datasetName = stiInfoSplit[0]\r\n semanticClassName = stiInfoSplit[1]\r\n stimulusFileName = stiInfoSplit[2]\r\n stimulusName = stiInfoSplit[2].split(\".\")[0]\r\n stimulusExe = stiInfoSplit[2].split(\".\")[1]\r\n stimulusDirName = stimulusName +\"_\"+ stimulusExe\r\n\r\n if len(RAW_DATA_LIST) == 0:\r\n rawPath = \"./static/__cache__/sCache/\" + datasetName +\"-\"+ semanticClassName +\"-\"+ stimulusDirName +\"-raw\"+\".csv\"\r\n rawDF = pd.read_csv(rawPath)\r\n RAW_DATA_LIST = rawDF.values.tolist()\r\n \r\n dfCols = [\"id\", \"x\", \"y\", \"duration\", \"label_gt\", \"label_sm\"]\r\n for featureName in FEATURE_ordered:\r\n dfCols.append(featureName)\r\n fixationsDF = pd.DataFrame(RAW_DATA_LIST, columns=dfCols)\r\n dfFrontCols = [\"id\", \"x\", \"y\", \"duration\", \"label_gt\", \"label_sm\"]\r\n tfDF = featureIndividualNormalizations(METHODS_LIST, fixationsDF, FEATURE_ordered, dfFrontCols)\r\n individualNormalizeFixations = tfDF.values.tolist()\r\n \r\n response['status'] = 'success'\r\n response['individualNormalizeFixations'] = individualNormalizeFixations\r\n except Exception as e:\r\n response['status'] = 'failed'\r\n response['reason'] = e\r\n print(e)\r\n return json.dumps(response)", "sub_path": "gaze_project/gazeBehavior/ver_6/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 81798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "18", "api": [{"api_name": "flask_cors.CORS", "line_number": 51, "usage_type": "call"}, {"api_name": "pythons.models.IttiKoch1998.pySaliencyMap.pySaliencyMap", "line_number": 64, "usage_type": "call"}, {"api_name": "pythons.models.IttiKoch1998.pySaliencyMap", "line_number": 64, "usage_type": "name"}, {"api_name": "cv2.convertScaleAbs", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 181, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.single", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.single", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.trapz", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 296, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 308, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 309, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 310, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 311, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 314, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 316, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 318, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 319, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 320, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 321, "usage_type": "attribute"}, {"api_name": "numpy.bool", "line_number": 327, "usage_type": "attribute"}, {"api_name": "numpy.finfo", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 328, "usage_type": "attribute"}, {"api_name": "numpy.log2", "line_number": 329, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 341, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 342, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 343, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 344, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 347, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 348, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 349, "usage_type": "attribute"}, {"api_name": "numpy.minimum", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 359, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 374, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 376, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 382, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 402, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path", "line_number": 403, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 405, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 408, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 414, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 456, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 458, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 511, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 516, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PowerTransformer", "line_number": 521, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 528, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 555, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 567, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PowerTransformer", "line_number": 579, "usage_type": "call"}, {"api_name": "sklearn.manifold.MDS", "line_number": 632, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 637, "usage_type": "call"}, {"api_name": "sklearn.decomposition.FastICA", "line_number": 642, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 647, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 652, "usage_type": "call"}, {"api_name": "sklearn.cross_decomposition.PLSRegression", "line_number": 657, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 730, "usage_type": "call"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 755, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 755, "usage_type": "name"}, {"api_name": "fastdtw.fastdtw", "line_number": 792, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 792, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 802, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 803, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 895, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 916, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 919, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 924, "usage_type": "call"}, {"api_name": "os.path", "line_number": 924, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 937, "usage_type": "call"}, {"api_name": "os.path", "line_number": 937, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 939, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 951, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 974, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 998, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1020, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1036, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1049, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 1072, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1080, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1087, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1105, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1138, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1140, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1165, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 1193, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1203, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 1214, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1217, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1220, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1234, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1242, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1258, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1274, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1284, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1287, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1302, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 1313, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 1352, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1358, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1361, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 1382, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1388, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1404, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1422, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1428, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1436, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1439, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1465, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1495, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 1540, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1555, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1572, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1595, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1595, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 1602, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1627, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1627, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 1628, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1635, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 1635, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 1641, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 1645, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1653, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1674, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1674, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 1676, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1692, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1692, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 1699, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1724, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1724, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 1725, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 1732, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 1732, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 1738, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 1742, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1750, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 1752, "usage_type": "call"}, {"api_name": "numpy.quantile", "line_number": 1757, "usage_type": "call"}, {"api_name": "numpy.quantile", "line_number": 1758, "usage_type": "call"}, {"api_name": "numpy.quantile", "line_number": 1759, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1773, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1794, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1794, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 1796, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1807, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1823, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 1826, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1838, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1853, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1858, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1865, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1871, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 1875, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1898, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1900, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1913, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1929, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1941, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 1954, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 1968, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 2002, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 2008, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 2013, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 2023, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2023, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 2030, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 2034, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2041, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 2058, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2058, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 2059, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 2066, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 2073, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 2082, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2086, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 2092, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 2093, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 2109, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 2131, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 2137, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 2148, "usage_type": "call"}]}