content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from __future__ import annotations\n\nimport sys\nfrom collections.abc import Awaitable, Callable, Generator\nfrom concurrent.futures import Future\nfrom contextlib import (\n AbstractAsyncContextManager,\n AbstractContextManager,\n contextmanager,\n)\nfrom dataclasses import dataclass, field\nfrom inspect import isawaitable\nfrom threading import Lock, Thread, get_ident\nfrom types import TracebackType\nfrom typing import (\n Any,\n Generic,\n TypeVar,\n cast,\n overload,\n)\n\nfrom ._core import _eventloop\nfrom ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals\nfrom ._core._synchronization import Event\nfrom ._core._tasks import CancelScope, create_task_group\nfrom .abc import AsyncBackend\nfrom .abc._tasks import TaskStatus\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nT_Retval = TypeVar("T_Retval")\nT_co = TypeVar("T_co", covariant=True)\nPosArgsT = TypeVarTuple("PosArgsT")\n\n\ndef run(\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]\n) -> T_Retval:\n """\n Call a coroutine function from a worker thread.\n\n :param func: a coroutine function\n :param args: positional arguments for the callable\n :return: the return value of the coroutine function\n\n """\n try:\n async_backend = threadlocals.current_async_backend\n token = threadlocals.current_token\n except AttributeError:\n raise RuntimeError(\n "This function can only be run from an AnyIO worker thread"\n ) from None\n\n return async_backend.run_async_from_thread(func, args, token=token)\n\n\ndef run_sync(\n func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]\n) -> T_Retval:\n """\n Call a function in the event loop thread from a worker thread.\n\n :param func: a callable\n :param args: positional arguments for the callable\n :return: the return value of the callable\n\n """\n try:\n async_backend = threadlocals.current_async_backend\n token = threadlocals.current_token\n except AttributeError:\n raise RuntimeError(\n "This function can only be run from an AnyIO worker thread"\n ) from None\n\n return async_backend.run_sync_from_thread(func, args, token=token)\n\n\nclass _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):\n _enter_future: Future[T_co]\n _exit_future: Future[bool | None]\n _exit_event: Event\n _exit_exc_info: tuple[\n type[BaseException] | None, BaseException | None, TracebackType | None\n ] = (None, None, None)\n\n def __init__(\n self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal\n ):\n self._async_cm = async_cm\n self._portal = portal\n\n async def run_async_cm(self) -> bool | None:\n try:\n self._exit_event = Event()\n value = await self._async_cm.__aenter__()\n except BaseException as exc:\n self._enter_future.set_exception(exc)\n raise\n else:\n self._enter_future.set_result(value)\n\n try:\n # Wait for the sync context manager to exit.\n # This next statement can raise `get_cancelled_exc_class()` if\n # something went wrong in a task group in this async context\n # manager.\n await self._exit_event.wait()\n finally:\n # In case of cancellation, it could be that we end up here before\n # `_BlockingAsyncContextManager.__exit__` is called, and an\n # `_exit_exc_info` has been set.\n result = await self._async_cm.__aexit__(*self._exit_exc_info)\n return result\n\n def __enter__(self) -> T_co:\n self._enter_future = Future()\n self._exit_future = self._portal.start_task_soon(self.run_async_cm)\n return self._enter_future.result()\n\n def __exit__(\n self,\n __exc_type: type[BaseException] | None,\n __exc_value: BaseException | None,\n __traceback: TracebackType | None,\n ) -> bool | None:\n self._exit_exc_info = __exc_type, __exc_value, __traceback\n self._portal.call(self._exit_event.set)\n return self._exit_future.result()\n\n\nclass _BlockingPortalTaskStatus(TaskStatus):\n def __init__(self, future: Future):\n self._future = future\n\n def started(self, value: object = None) -> None:\n self._future.set_result(value)\n\n\nclass BlockingPortal:\n """An object that lets external threads run code in an asynchronous event loop."""\n\n def __new__(cls) -> BlockingPortal:\n return get_async_backend().create_blocking_portal()\n\n def __init__(self) -> None:\n self._event_loop_thread_id: int | None = get_ident()\n self._stop_event = Event()\n self._task_group = create_task_group()\n self._cancelled_exc_class = get_cancelled_exc_class()\n\n async def __aenter__(self) -> BlockingPortal:\n await self._task_group.__aenter__()\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n await self.stop()\n return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)\n\n def _check_running(self) -> None:\n if self._event_loop_thread_id is None:\n raise RuntimeError("This portal is not running")\n if self._event_loop_thread_id == get_ident():\n raise RuntimeError(\n "This method cannot be called from the event loop thread"\n )\n\n async def sleep_until_stopped(self) -> None:\n """Sleep until :meth:`stop` is called."""\n await self._stop_event.wait()\n\n async def stop(self, cancel_remaining: bool = False) -> None:\n """\n Signal the portal to shut down.\n\n This marks the portal as no longer accepting new calls and exits from\n :meth:`sleep_until_stopped`.\n\n :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``\n to let them finish before returning\n\n """\n self._event_loop_thread_id = None\n self._stop_event.set()\n if cancel_remaining:\n self._task_group.cancel_scope.cancel()\n\n async def _call_func(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],\n args: tuple[Unpack[PosArgsT]],\n kwargs: dict[str, Any],\n future: Future[T_Retval],\n ) -> None:\n def callback(f: Future[T_Retval]) -> None:\n if f.cancelled() and self._event_loop_thread_id not in (\n None,\n get_ident(),\n ):\n self.call(scope.cancel)\n\n try:\n retval_or_awaitable = func(*args, **kwargs)\n if isawaitable(retval_or_awaitable):\n with CancelScope() as scope:\n if future.cancelled():\n scope.cancel()\n else:\n future.add_done_callback(callback)\n\n retval = await retval_or_awaitable\n else:\n retval = retval_or_awaitable\n except self._cancelled_exc_class:\n future.cancel()\n future.set_running_or_notify_cancel()\n except BaseException as exc:\n if not future.cancelled():\n future.set_exception(exc)\n\n # Let base exceptions fall through\n if not isinstance(exc, Exception):\n raise\n else:\n if not future.cancelled():\n future.set_result(retval)\n finally:\n scope = None # type: ignore[assignment]\n\n def _spawn_task_from_thread(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],\n args: tuple[Unpack[PosArgsT]],\n kwargs: dict[str, Any],\n name: object,\n future: Future[T_Retval],\n ) -> None:\n """\n Spawn a new task using the given callable.\n\n Implementers must ensure that the future is resolved when the task finishes.\n\n :param func: a callable\n :param args: positional arguments to be passed to the callable\n :param kwargs: keyword arguments to be passed to the callable\n :param name: name of the task (will be coerced to a string if not ``None``)\n :param future: a future that will resolve to the return value of the callable,\n or the exception raised during its execution\n\n """\n raise NotImplementedError\n\n @overload\n def call(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n *args: Unpack[PosArgsT],\n ) -> T_Retval: ...\n\n @overload\n def call(\n self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]\n ) -> T_Retval: ...\n\n def call(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],\n *args: Unpack[PosArgsT],\n ) -> T_Retval:\n """\n Call the given function in the event loop thread.\n\n If the callable returns a coroutine object, it is awaited on.\n\n :param func: any callable\n :raises RuntimeError: if the portal is not running or if this method is called\n from within the event loop thread\n\n """\n return cast(T_Retval, self.start_task_soon(func, *args).result())\n\n @overload\n def start_task_soon(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n *args: Unpack[PosArgsT],\n name: object = None,\n ) -> Future[T_Retval]: ...\n\n @overload\n def start_task_soon(\n self,\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n *args: Unpack[PosArgsT],\n name: object = None,\n ) -> Future[T_Retval]: ...\n\n def start_task_soon(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],\n *args: Unpack[PosArgsT],\n name: object = None,\n ) -> Future[T_Retval]:\n """\n Start a task in the portal's task group.\n\n The task will be run inside a cancel scope which can be cancelled by cancelling\n the returned future.\n\n :param func: the target function\n :param args: positional arguments passed to ``func``\n :param name: name of the task (will be coerced to a string if not ``None``)\n :return: a future that resolves with the return value of the callable if the\n task completes successfully, or with the exception raised in the task\n :raises RuntimeError: if the portal is not running or if this method is called\n from within the event loop thread\n :rtype: concurrent.futures.Future[T_Retval]\n\n .. versionadded:: 3.0\n\n """\n self._check_running()\n f: Future[T_Retval] = Future()\n self._spawn_task_from_thread(func, args, {}, name, f)\n return f\n\n def start_task(\n self,\n func: Callable[..., Awaitable[T_Retval]],\n *args: object,\n name: object = None,\n ) -> tuple[Future[T_Retval], Any]:\n """\n Start a task in the portal's task group and wait until it signals for readiness.\n\n This method works the same way as :meth:`.abc.TaskGroup.start`.\n\n :param func: the target function\n :param args: positional arguments passed to ``func``\n :param name: name of the task (will be coerced to a string if not ``None``)\n :return: a tuple of (future, task_status_value) where the ``task_status_value``\n is the value passed to ``task_status.started()`` from within the target\n function\n :rtype: tuple[concurrent.futures.Future[T_Retval], Any]\n\n .. versionadded:: 3.0\n\n """\n\n def task_done(future: Future[T_Retval]) -> None:\n if not task_status_future.done():\n if future.cancelled():\n task_status_future.cancel()\n elif future.exception():\n task_status_future.set_exception(future.exception())\n else:\n exc = RuntimeError(\n "Task exited without calling task_status.started()"\n )\n task_status_future.set_exception(exc)\n\n self._check_running()\n task_status_future: Future = Future()\n task_status = _BlockingPortalTaskStatus(task_status_future)\n f: Future = Future()\n f.add_done_callback(task_done)\n self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)\n return f, task_status_future.result()\n\n def wrap_async_context_manager(\n self, cm: AbstractAsyncContextManager[T_co]\n ) -> AbstractContextManager[T_co]:\n """\n Wrap an async context manager as a synchronous context manager via this portal.\n\n Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping\n in the middle until the synchronous context manager exits.\n\n :param cm: an asynchronous context manager\n :return: a synchronous context manager\n\n .. versionadded:: 2.1\n\n """\n return _BlockingAsyncContextManager(cm, self)\n\n\n@dataclass\nclass BlockingPortalProvider:\n """\n A manager for a blocking portal. Used as a context manager. The first thread to\n enter this context manager causes a blocking portal to be started with the specific\n parameters, and the last thread to exit causes the portal to be shut down. Thus,\n there will be exactly one blocking portal running in this context as long as at\n least one thread has entered this context manager.\n\n The parameters are the same as for :func:`~anyio.run`.\n\n :param backend: name of the backend\n :param backend_options: backend options\n\n .. versionadded:: 4.4\n """\n\n backend: str = "asyncio"\n backend_options: dict[str, Any] | None = None\n _lock: Lock = field(init=False, default_factory=Lock)\n _leases: int = field(init=False, default=0)\n _portal: BlockingPortal = field(init=False)\n _portal_cm: AbstractContextManager[BlockingPortal] | None = field(\n init=False, default=None\n )\n\n def __enter__(self) -> BlockingPortal:\n with self._lock:\n if self._portal_cm is None:\n self._portal_cm = start_blocking_portal(\n self.backend, self.backend_options\n )\n self._portal = self._portal_cm.__enter__()\n\n self._leases += 1\n return self._portal\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n portal_cm: AbstractContextManager[BlockingPortal] | None = None\n with self._lock:\n assert self._portal_cm\n assert self._leases > 0\n self._leases -= 1\n if not self._leases:\n portal_cm = self._portal_cm\n self._portal_cm = None\n del self._portal\n\n if portal_cm:\n portal_cm.__exit__(None, None, None)\n\n\n@contextmanager\ndef start_blocking_portal(\n backend: str = "asyncio", backend_options: dict[str, Any] | None = None\n) -> Generator[BlockingPortal, Any, None]:\n """\n Start a new event loop in a new thread and run a blocking portal in its main task.\n\n The parameters are the same as for :func:`~anyio.run`.\n\n :param backend: name of the backend\n :param backend_options: backend options\n :return: a context manager that yields a blocking portal\n\n .. versionchanged:: 3.0\n Usage as a context manager is now required.\n\n """\n\n async def run_portal() -> None:\n async with BlockingPortal() as portal_:\n future.set_result(portal_)\n await portal_.sleep_until_stopped()\n\n def run_blocking_portal() -> None:\n if future.set_running_or_notify_cancel():\n try:\n _eventloop.run(\n run_portal, backend=backend, backend_options=backend_options\n )\n except BaseException as exc:\n if not future.done():\n future.set_exception(exc)\n\n future: Future[BlockingPortal] = Future()\n thread = Thread(target=run_blocking_portal, daemon=True)\n thread.start()\n try:\n cancel_remaining_tasks = False\n portal = future.result()\n try:\n yield portal\n except BaseException:\n cancel_remaining_tasks = True\n raise\n finally:\n try:\n portal.call(portal.stop, cancel_remaining_tasks)\n except RuntimeError:\n pass\n finally:\n thread.join()\n\n\ndef check_cancelled() -> None:\n """\n Check if the cancel scope of the host task's running the current worker thread has\n been cancelled.\n\n If the host task's current cancel scope has indeed been cancelled, the\n backend-specific cancellation exception will be raised.\n\n :raises RuntimeError: if the current thread was not spawned by\n :func:`.to_thread.run_sync`\n\n """\n try:\n async_backend: AsyncBackend = threadlocals.current_async_backend\n except AttributeError:\n raise RuntimeError(\n "This function can only be run from an AnyIO worker thread"\n ) from None\n\n async_backend.check_cancelled()\n
|
.venv\Lib\site-packages\anyio\from_thread.py
|
from_thread.py
|
Python
| 17,478 | 0.95 | 0.176471 | 0.032333 |
react-lib
| 44 |
2023-11-19T16:36:49.076220
|
Apache-2.0
| false |
06e3d352e59700b98f4de32dff5e16bd
|
from __future__ import annotations\n\nimport enum\nfrom dataclasses import dataclass\nfrom typing import Any, Generic, Literal, TypeVar, overload\nfrom weakref import WeakKeyDictionary\n\nfrom ._core._eventloop import get_async_backend\n\nT = TypeVar("T")\nD = TypeVar("D")\n\n\nasync def checkpoint() -> None:\n """\n Check for cancellation and allow the scheduler to switch to another task.\n\n Equivalent to (but more efficient than)::\n\n await checkpoint_if_cancelled()\n await cancel_shielded_checkpoint()\n\n\n .. versionadded:: 3.0\n\n """\n await get_async_backend().checkpoint()\n\n\nasync def checkpoint_if_cancelled() -> None:\n """\n Enter a checkpoint if the enclosing cancel scope has been cancelled.\n\n This does not allow the scheduler to switch to a different task.\n\n .. versionadded:: 3.0\n\n """\n await get_async_backend().checkpoint_if_cancelled()\n\n\nasync def cancel_shielded_checkpoint() -> None:\n """\n Allow the scheduler to switch to another task but without checking for cancellation.\n\n Equivalent to (but potentially more efficient than)::\n\n with CancelScope(shield=True):\n await checkpoint()\n\n\n .. versionadded:: 3.0\n\n """\n await get_async_backend().cancel_shielded_checkpoint()\n\n\ndef current_token() -> object:\n """\n Return a backend specific token object that can be used to get back to the event\n loop.\n\n """\n return get_async_backend().current_token()\n\n\n_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()\n_token_wrappers: dict[Any, _TokenWrapper] = {}\n\n\n@dataclass(frozen=True)\nclass _TokenWrapper:\n __slots__ = "_token", "__weakref__"\n _token: object\n\n\nclass _NoValueSet(enum.Enum):\n NO_VALUE_SET = enum.auto()\n\n\nclass RunvarToken(Generic[T]):\n __slots__ = "_var", "_value", "_redeemed"\n\n def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):\n self._var = var\n self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value\n self._redeemed = False\n\n\nclass RunVar(Generic[T]):\n """\n Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.\n """\n\n __slots__ = "_name", "_default"\n\n NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET\n\n _token_wrappers: set[_TokenWrapper] = set()\n\n def __init__(\n self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET\n ):\n self._name = name\n self._default = default\n\n @property\n def _current_vars(self) -> dict[str, T]:\n token = current_token()\n try:\n return _run_vars[token]\n except KeyError:\n run_vars = _run_vars[token] = {}\n return run_vars\n\n @overload\n def get(self, default: D) -> T | D: ...\n\n @overload\n def get(self) -> T: ...\n\n def get(\n self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET\n ) -> T | D:\n try:\n return self._current_vars[self._name]\n except KeyError:\n if default is not RunVar.NO_VALUE_SET:\n return default\n elif self._default is not RunVar.NO_VALUE_SET:\n return self._default\n\n raise LookupError(\n f'Run variable "{self._name}" has no value and no default set'\n )\n\n def set(self, value: T) -> RunvarToken[T]:\n current_vars = self._current_vars\n token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))\n current_vars[self._name] = value\n return token\n\n def reset(self, token: RunvarToken[T]) -> None:\n if token._var is not self:\n raise ValueError("This token does not belong to this RunVar")\n\n if token._redeemed:\n raise ValueError("This token has already been used")\n\n if token._value is _NoValueSet.NO_VALUE_SET:\n try:\n del self._current_vars[self._name]\n except KeyError:\n pass\n else:\n self._current_vars[self._name] = token._value\n\n token._redeemed = True\n\n def __repr__(self) -> str:\n return f"<RunVar name={self._name!r}>"\n
|
.venv\Lib\site-packages\anyio\lowlevel.py
|
lowlevel.py
|
Python
| 4,169 | 0.85 | 0.192547 | 0 |
awesome-app
| 617 |
2023-07-15T10:24:16.442314
|
GPL-3.0
| false |
468ad2c6bbf0771f368381f7c274a001
|
from __future__ import annotations\n\nimport os\nimport pickle\nimport subprocess\nimport sys\nfrom collections import deque\nfrom collections.abc import Callable\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom typing import TypeVar, cast\n\nfrom ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class\nfrom ._core._exceptions import BrokenWorkerProcess\nfrom ._core._subprocesses import open_process\nfrom ._core._synchronization import CapacityLimiter\nfrom ._core._tasks import CancelScope, fail_after\nfrom .abc import ByteReceiveStream, ByteSendStream, Process\nfrom .lowlevel import RunVar, checkpoint_if_cancelled\nfrom .streams.buffered import BufferedByteReceiveStream\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nWORKER_MAX_IDLE_TIME = 300 # 5 minutes\n\nT_Retval = TypeVar("T_Retval")\nPosArgsT = TypeVarTuple("PosArgsT")\n\n_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")\n_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(\n "_process_pool_idle_workers"\n)\n_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")\n\n\nasync def run_sync( # type: ignore[return]\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n *args: Unpack[PosArgsT],\n cancellable: bool = False,\n limiter: CapacityLimiter | None = None,\n) -> T_Retval:\n """\n Call the given function with the given arguments in a worker process.\n\n If the ``cancellable`` option is enabled and the task waiting for its completion is\n cancelled, the worker process running it will be abruptly terminated using SIGKILL\n (or ``terminateProcess()`` on Windows).\n\n :param func: a callable\n :param args: positional arguments for the callable\n :param cancellable: ``True`` to allow cancellation of the operation while it's\n running\n :param limiter: capacity limiter to use to limit the total amount of processes\n running (if omitted, the default limiter is used)\n :return: an awaitable that yields the return value of the function.\n\n """\n\n async def send_raw_command(pickled_cmd: bytes) -> object:\n try:\n await stdin.send(pickled_cmd)\n response = await buffered.receive_until(b"\n", 50)\n status, length = response.split(b" ")\n if status not in (b"RETURN", b"EXCEPTION"):\n raise RuntimeError(\n f"Worker process returned unexpected response: {response!r}"\n )\n\n pickled_response = await buffered.receive_exactly(int(length))\n except BaseException as exc:\n workers.discard(process)\n try:\n process.kill()\n with CancelScope(shield=True):\n await process.aclose()\n except ProcessLookupError:\n pass\n\n if isinstance(exc, get_cancelled_exc_class()):\n raise\n else:\n raise BrokenWorkerProcess from exc\n\n retval = pickle.loads(pickled_response)\n if status == b"EXCEPTION":\n assert isinstance(retval, BaseException)\n raise retval\n else:\n return retval\n\n # First pickle the request before trying to reserve a worker process\n await checkpoint_if_cancelled()\n request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)\n\n # If this is the first run in this event loop thread, set up the necessary variables\n try:\n workers = _process_pool_workers.get()\n idle_workers = _process_pool_idle_workers.get()\n except LookupError:\n workers = set()\n idle_workers = deque()\n _process_pool_workers.set(workers)\n _process_pool_idle_workers.set(idle_workers)\n get_async_backend().setup_process_pool_exit_at_shutdown(workers)\n\n async with limiter or current_default_process_limiter():\n # Pop processes from the pool (starting from the most recently used) until we\n # find one that hasn't exited yet\n process: Process\n while idle_workers:\n process, idle_since = idle_workers.pop()\n if process.returncode is None:\n stdin = cast(ByteSendStream, process.stdin)\n buffered = BufferedByteReceiveStream(\n cast(ByteReceiveStream, process.stdout)\n )\n\n # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME\n # seconds or longer\n now = current_time()\n killed_processes: list[Process] = []\n while idle_workers:\n if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:\n break\n\n process_to_kill, idle_since = idle_workers.popleft()\n process_to_kill.kill()\n workers.remove(process_to_kill)\n killed_processes.append(process_to_kill)\n\n with CancelScope(shield=True):\n for killed_process in killed_processes:\n await killed_process.aclose()\n\n break\n\n workers.remove(process)\n else:\n command = [sys.executable, "-u", "-m", __name__]\n process = await open_process(\n command, stdin=subprocess.PIPE, stdout=subprocess.PIPE\n )\n try:\n stdin = cast(ByteSendStream, process.stdin)\n buffered = BufferedByteReceiveStream(\n cast(ByteReceiveStream, process.stdout)\n )\n with fail_after(20):\n message = await buffered.receive(6)\n\n if message != b"READY\n":\n raise BrokenWorkerProcess(\n f"Worker process returned unexpected response: {message!r}"\n )\n\n main_module_path = getattr(sys.modules["__main__"], "__file__", None)\n pickled = pickle.dumps(\n ("init", sys.path, main_module_path),\n protocol=pickle.HIGHEST_PROTOCOL,\n )\n await send_raw_command(pickled)\n except (BrokenWorkerProcess, get_cancelled_exc_class()):\n raise\n except BaseException as exc:\n process.kill()\n raise BrokenWorkerProcess(\n "Error during worker process initialization"\n ) from exc\n\n workers.add(process)\n\n with CancelScope(shield=not cancellable):\n try:\n return cast(T_Retval, await send_raw_command(request))\n finally:\n if process in workers:\n idle_workers.append((process, current_time()))\n\n\ndef current_default_process_limiter() -> CapacityLimiter:\n """\n Return the capacity limiter that is used by default to limit the number of worker\n processes.\n\n :return: a capacity limiter object\n\n """\n try:\n return _default_process_limiter.get()\n except LookupError:\n limiter = CapacityLimiter(os.cpu_count() or 2)\n _default_process_limiter.set(limiter)\n return limiter\n\n\ndef process_worker() -> None:\n # Redirect standard streams to os.devnull so that user code won't interfere with the\n # parent-worker communication\n stdin = sys.stdin\n stdout = sys.stdout\n sys.stdin = open(os.devnull)\n sys.stdout = open(os.devnull, "w")\n\n stdout.buffer.write(b"READY\n")\n while True:\n retval = exception = None\n try:\n command, *args = pickle.load(stdin.buffer)\n except EOFError:\n return\n except BaseException as exc:\n exception = exc\n else:\n if command == "run":\n func, args = args\n try:\n retval = func(*args)\n except BaseException as exc:\n exception = exc\n elif command == "init":\n main_module_path: str | None\n sys.path, main_module_path = args\n del sys.modules["__main__"]\n if main_module_path and os.path.isfile(main_module_path):\n # Load the parent's main module but as __mp_main__ instead of\n # __main__ (like multiprocessing does) to avoid infinite recursion\n try:\n spec = spec_from_file_location("__mp_main__", main_module_path)\n if spec and spec.loader:\n main = module_from_spec(spec)\n spec.loader.exec_module(main)\n sys.modules["__main__"] = main\n except BaseException as exc:\n exception = exc\n try:\n if exception is not None:\n status = b"EXCEPTION"\n pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)\n else:\n status = b"RETURN"\n pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)\n except BaseException as exc:\n exception = exc\n status = b"EXCEPTION"\n pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)\n\n stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))\n stdout.buffer.write(pickled)\n\n # Respect SIGTERM\n if isinstance(exception, SystemExit):\n raise exception\n\n\nif __name__ == "__main__":\n process_worker()\n
|
.venv\Lib\site-packages\anyio\to_process.py
|
to_process.py
|
Python
| 9,595 | 0.95 | 0.151163 | 0.054545 |
node-utils
| 450 |
2025-03-23T07:34:23.613655
|
BSD-3-Clause
| false |
03a3cd1b4080e9ff10902c65f506f701
|
from __future__ import annotations\n\nimport sys\nfrom collections.abc import Callable\nfrom typing import TypeVar\nfrom warnings import warn\n\nfrom ._core._eventloop import get_async_backend\nfrom .abc import CapacityLimiter\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nT_Retval = TypeVar("T_Retval")\nPosArgsT = TypeVarTuple("PosArgsT")\n\n\nasync def run_sync(\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n *args: Unpack[PosArgsT],\n abandon_on_cancel: bool = False,\n cancellable: bool | None = None,\n limiter: CapacityLimiter | None = None,\n) -> T_Retval:\n """\n Call the given function with the given arguments in a worker thread.\n\n If the ``cancellable`` option is enabled and the task waiting for its completion is\n cancelled, the thread will still run its course but its return value (or any raised\n exception) will be ignored.\n\n :param func: a callable\n :param args: positional arguments for the callable\n :param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run\n unchecked on own) if the host task is cancelled, ``False`` to ignore\n cancellations in the host task until the operation has completed in the worker\n thread\n :param cancellable: deprecated alias of ``abandon_on_cancel``; will override\n ``abandon_on_cancel`` if both parameters are passed\n :param limiter: capacity limiter to use to limit the total amount of threads running\n (if omitted, the default limiter is used)\n :return: an awaitable that yields the return value of the function.\n\n """\n if cancellable is not None:\n abandon_on_cancel = cancellable\n warn(\n "The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "\n "deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return await get_async_backend().run_sync_in_worker_thread(\n func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter\n )\n\n\ndef current_default_thread_limiter() -> CapacityLimiter:\n """\n Return the capacity limiter that is used by default to limit the number of\n concurrent threads.\n\n :return: a capacity limiter object\n\n """\n return get_async_backend().current_default_thread_limiter()\n
|
.venv\Lib\site-packages\anyio\to_thread.py
|
to_thread.py
|
Python
| 2,396 | 0.85 | 0.15942 | 0.018182 |
node-utils
| 469 |
2024-10-29T14:25:08.744227
|
Apache-2.0
| false |
f5f64e08cf3df8fe19549d9f45981ed9
|
from __future__ import annotations\n\nfrom ._core._eventloop import current_time as current_time\nfrom ._core._eventloop import get_all_backends as get_all_backends\nfrom ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class\nfrom ._core._eventloop import run as run\nfrom ._core._eventloop import sleep as sleep\nfrom ._core._eventloop import sleep_forever as sleep_forever\nfrom ._core._eventloop import sleep_until as sleep_until\nfrom ._core._exceptions import BrokenResourceError as BrokenResourceError\nfrom ._core._exceptions import BrokenWorkerIntepreter as BrokenWorkerIntepreter\nfrom ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess\nfrom ._core._exceptions import BusyResourceError as BusyResourceError\nfrom ._core._exceptions import ClosedResourceError as ClosedResourceError\nfrom ._core._exceptions import DelimiterNotFound as DelimiterNotFound\nfrom ._core._exceptions import EndOfStream as EndOfStream\nfrom ._core._exceptions import IncompleteRead as IncompleteRead\nfrom ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError\nfrom ._core._exceptions import WouldBlock as WouldBlock\nfrom ._core._fileio import AsyncFile as AsyncFile\nfrom ._core._fileio import Path as Path\nfrom ._core._fileio import open_file as open_file\nfrom ._core._fileio import wrap_file as wrap_file\nfrom ._core._resources import aclose_forcefully as aclose_forcefully\nfrom ._core._signals import open_signal_receiver as open_signal_receiver\nfrom ._core._sockets import connect_tcp as connect_tcp\nfrom ._core._sockets import connect_unix as connect_unix\nfrom ._core._sockets import create_connected_udp_socket as create_connected_udp_socket\nfrom ._core._sockets import (\n create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,\n)\nfrom ._core._sockets import create_tcp_listener as create_tcp_listener\nfrom ._core._sockets import create_udp_socket as create_udp_socket\nfrom ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket\nfrom ._core._sockets import create_unix_listener as create_unix_listener\nfrom ._core._sockets import getaddrinfo as getaddrinfo\nfrom ._core._sockets import getnameinfo as getnameinfo\nfrom ._core._sockets import wait_readable as wait_readable\nfrom ._core._sockets import wait_socket_readable as wait_socket_readable\nfrom ._core._sockets import wait_socket_writable as wait_socket_writable\nfrom ._core._sockets import wait_writable as wait_writable\nfrom ._core._streams import create_memory_object_stream as create_memory_object_stream\nfrom ._core._subprocesses import open_process as open_process\nfrom ._core._subprocesses import run_process as run_process\nfrom ._core._synchronization import CapacityLimiter as CapacityLimiter\nfrom ._core._synchronization import (\n CapacityLimiterStatistics as CapacityLimiterStatistics,\n)\nfrom ._core._synchronization import Condition as Condition\nfrom ._core._synchronization import ConditionStatistics as ConditionStatistics\nfrom ._core._synchronization import Event as Event\nfrom ._core._synchronization import EventStatistics as EventStatistics\nfrom ._core._synchronization import Lock as Lock\nfrom ._core._synchronization import LockStatistics as LockStatistics\nfrom ._core._synchronization import ResourceGuard as ResourceGuard\nfrom ._core._synchronization import Semaphore as Semaphore\nfrom ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics\nfrom ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED\nfrom ._core._tasks import CancelScope as CancelScope\nfrom ._core._tasks import create_task_group as create_task_group\nfrom ._core._tasks import current_effective_deadline as current_effective_deadline\nfrom ._core._tasks import fail_after as fail_after\nfrom ._core._tasks import move_on_after as move_on_after\nfrom ._core._tempfile import NamedTemporaryFile as NamedTemporaryFile\nfrom ._core._tempfile import SpooledTemporaryFile as SpooledTemporaryFile\nfrom ._core._tempfile import TemporaryDirectory as TemporaryDirectory\nfrom ._core._tempfile import TemporaryFile as TemporaryFile\nfrom ._core._tempfile import gettempdir as gettempdir\nfrom ._core._tempfile import gettempdirb as gettempdirb\nfrom ._core._tempfile import mkdtemp as mkdtemp\nfrom ._core._tempfile import mkstemp as mkstemp\nfrom ._core._testing import TaskInfo as TaskInfo\nfrom ._core._testing import get_current_task as get_current_task\nfrom ._core._testing import get_running_tasks as get_running_tasks\nfrom ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked\nfrom ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider\nfrom ._core._typedattr import TypedAttributeSet as TypedAttributeSet\nfrom ._core._typedattr import typed_attribute as typed_attribute\n\n# Re-export imports so they look like they live directly in this package\nfor __value in list(locals().values()):\n if getattr(__value, "__module__", "").startswith("anyio."):\n __value.__module__ = __name__\n\ndel __value\n
|
.venv\Lib\site-packages\anyio\__init__.py
|
__init__.py
|
Python
| 4,993 | 0.95 | 0.023529 | 0.012195 |
python-kit
| 242 |
2023-08-04T23:11:02.886265
|
GPL-3.0
| false |
895c01b62412506433227ecba7922199
|
from __future__ import annotations\n\nimport math\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom collections.abc import AsyncIterator, Awaitable, Callable, Sequence\nfrom contextlib import AbstractContextManager\nfrom os import PathLike\nfrom signal import Signals\nfrom socket import AddressFamily, SocketKind, socket\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n TypeVar,\n Union,\n overload,\n)\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nif sys.version_info >= (3, 10):\n from typing import TypeAlias\nelse:\n from typing_extensions import TypeAlias\n\nif TYPE_CHECKING:\n from _typeshed import HasFileno\n\n from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore\n from .._core._tasks import CancelScope\n from .._core._testing import TaskInfo\n from ..from_thread import BlockingPortal\n from ._sockets import (\n ConnectedUDPSocket,\n ConnectedUNIXDatagramSocket,\n IPSockAddrType,\n SocketListener,\n SocketStream,\n UDPSocket,\n UNIXDatagramSocket,\n UNIXSocketStream,\n )\n from ._subprocesses import Process\n from ._tasks import TaskGroup\n from ._testing import TestRunner\n\nT_Retval = TypeVar("T_Retval")\nPosArgsT = TypeVarTuple("PosArgsT")\nStrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]\n\n\nclass AsyncBackend(metaclass=ABCMeta):\n @classmethod\n @abstractmethod\n def run(\n cls,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n args: tuple[Unpack[PosArgsT]],\n kwargs: dict[str, Any],\n options: dict[str, Any],\n ) -> T_Retval:\n """\n Run the given coroutine function in an asynchronous event loop.\n\n The current thread must not be already running an event loop.\n\n :param func: a coroutine function\n :param args: positional arguments to ``func``\n :param kwargs: positional arguments to ``func``\n :param options: keyword arguments to call the backend ``run()`` implementation\n with\n :return: the return value of the coroutine function\n """\n\n @classmethod\n @abstractmethod\n def current_token(cls) -> object:\n """\n\n :return:\n """\n\n @classmethod\n @abstractmethod\n def current_time(cls) -> float:\n """\n Return the current value of the event loop's internal clock.\n\n :return: the clock value (seconds)\n """\n\n @classmethod\n @abstractmethod\n def cancelled_exception_class(cls) -> type[BaseException]:\n """Return the exception class that is raised in a task if it's cancelled."""\n\n @classmethod\n @abstractmethod\n async def checkpoint(cls) -> None:\n """\n Check if the task has been cancelled, and allow rescheduling of other tasks.\n\n This is effectively the same as running :meth:`checkpoint_if_cancelled` and then\n :meth:`cancel_shielded_checkpoint`.\n """\n\n @classmethod\n async def checkpoint_if_cancelled(cls) -> None:\n """\n Check if the current task group has been cancelled.\n\n This will check if the task has been cancelled, but will not allow other tasks\n to be scheduled if not.\n\n """\n if cls.current_effective_deadline() == -math.inf:\n await cls.checkpoint()\n\n @classmethod\n async def cancel_shielded_checkpoint(cls) -> None:\n """\n Allow the rescheduling of other tasks.\n\n This will give other tasks the opportunity to run, but without checking if the\n current task group has been cancelled, unlike with :meth:`checkpoint`.\n\n """\n with cls.create_cancel_scope(shield=True):\n await cls.sleep(0)\n\n @classmethod\n @abstractmethod\n async def sleep(cls, delay: float) -> None:\n """\n Pause the current task for the specified duration.\n\n :param delay: the duration, in seconds\n """\n\n @classmethod\n @abstractmethod\n def create_cancel_scope(\n cls, *, deadline: float = math.inf, shield: bool = False\n ) -> CancelScope:\n pass\n\n @classmethod\n @abstractmethod\n def current_effective_deadline(cls) -> float:\n """\n Return the nearest deadline among all the cancel scopes effective for the\n current task.\n\n :return:\n - a clock value from the event loop's internal clock\n - ``inf`` if there is no deadline in effect\n - ``-inf`` if the current scope has been cancelled\n :rtype: float\n """\n\n @classmethod\n @abstractmethod\n def create_task_group(cls) -> TaskGroup:\n pass\n\n @classmethod\n @abstractmethod\n def create_event(cls) -> Event:\n pass\n\n @classmethod\n @abstractmethod\n def create_lock(cls, *, fast_acquire: bool) -> Lock:\n pass\n\n @classmethod\n @abstractmethod\n def create_semaphore(\n cls,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> Semaphore:\n pass\n\n @classmethod\n @abstractmethod\n def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:\n pass\n\n @classmethod\n @abstractmethod\n async def run_sync_in_worker_thread(\n cls,\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n args: tuple[Unpack[PosArgsT]],\n abandon_on_cancel: bool = False,\n limiter: CapacityLimiter | None = None,\n ) -> T_Retval:\n pass\n\n @classmethod\n @abstractmethod\n def check_cancelled(cls) -> None:\n pass\n\n @classmethod\n @abstractmethod\n def run_async_from_thread(\n cls,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n args: tuple[Unpack[PosArgsT]],\n token: object,\n ) -> T_Retval:\n pass\n\n @classmethod\n @abstractmethod\n def run_sync_from_thread(\n cls,\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n args: tuple[Unpack[PosArgsT]],\n token: object,\n ) -> T_Retval:\n pass\n\n @classmethod\n @abstractmethod\n def create_blocking_portal(cls) -> BlockingPortal:\n pass\n\n @classmethod\n @abstractmethod\n async def open_process(\n cls,\n command: StrOrBytesPath | Sequence[StrOrBytesPath],\n *,\n stdin: int | IO[Any] | None,\n stdout: int | IO[Any] | None,\n stderr: int | IO[Any] | None,\n **kwargs: Any,\n ) -> Process:\n pass\n\n @classmethod\n @abstractmethod\n def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:\n pass\n\n @classmethod\n @abstractmethod\n async def connect_tcp(\n cls, host: str, port: int, local_address: IPSockAddrType | None = None\n ) -> SocketStream:\n pass\n\n @classmethod\n @abstractmethod\n async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:\n pass\n\n @classmethod\n @abstractmethod\n def create_tcp_listener(cls, sock: socket) -> SocketListener:\n pass\n\n @classmethod\n @abstractmethod\n def create_unix_listener(cls, sock: socket) -> SocketListener:\n pass\n\n @classmethod\n @abstractmethod\n async def create_udp_socket(\n cls,\n family: AddressFamily,\n local_address: IPSockAddrType | None,\n remote_address: IPSockAddrType | None,\n reuse_port: bool,\n ) -> UDPSocket | ConnectedUDPSocket:\n pass\n\n @classmethod\n @overload\n async def create_unix_datagram_socket(\n cls, raw_socket: socket, remote_path: None\n ) -> UNIXDatagramSocket: ...\n\n @classmethod\n @overload\n async def create_unix_datagram_socket(\n cls, raw_socket: socket, remote_path: str | bytes\n ) -> ConnectedUNIXDatagramSocket: ...\n\n @classmethod\n @abstractmethod\n async def create_unix_datagram_socket(\n cls, raw_socket: socket, remote_path: str | bytes | None\n ) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:\n pass\n\n @classmethod\n @abstractmethod\n async def getaddrinfo(\n cls,\n host: bytes | str | None,\n port: str | int | None,\n *,\n family: int | AddressFamily = 0,\n type: int | SocketKind = 0,\n proto: int = 0,\n flags: int = 0,\n ) -> Sequence[\n tuple[\n AddressFamily,\n SocketKind,\n int,\n str,\n tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],\n ]\n ]:\n pass\n\n @classmethod\n @abstractmethod\n async def getnameinfo(\n cls, sockaddr: IPSockAddrType, flags: int = 0\n ) -> tuple[str, str]:\n pass\n\n @classmethod\n @abstractmethod\n async def wait_readable(cls, obj: HasFileno | int) -> None:\n pass\n\n @classmethod\n @abstractmethod\n async def wait_writable(cls, obj: HasFileno | int) -> None:\n pass\n\n @classmethod\n @abstractmethod\n def current_default_thread_limiter(cls) -> CapacityLimiter:\n pass\n\n @classmethod\n @abstractmethod\n def open_signal_receiver(\n cls, *signals: Signals\n ) -> AbstractContextManager[AsyncIterator[Signals]]:\n pass\n\n @classmethod\n @abstractmethod\n def get_current_task(cls) -> TaskInfo:\n pass\n\n @classmethod\n @abstractmethod\n def get_running_tasks(cls) -> Sequence[TaskInfo]:\n pass\n\n @classmethod\n @abstractmethod\n async def wait_all_tasks_blocked(cls) -> None:\n pass\n\n @classmethod\n @abstractmethod\n def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:\n pass\n
|
.venv\Lib\site-packages\anyio\abc\_eventloop.py
|
_eventloop.py
|
Python
| 9,682 | 0.85 | 0.156915 | 0.012579 |
vue-tools
| 192 |
2023-10-10T23:46:07.336820
|
MIT
| false |
ffdec49aea93c083191b32bd697d6d17
|
from __future__ import annotations\n\nfrom abc import ABCMeta, abstractmethod\nfrom types import TracebackType\nfrom typing import TypeVar\n\nT = TypeVar("T")\n\n\nclass AsyncResource(metaclass=ABCMeta):\n """\n Abstract base class for all closeable asynchronous resources.\n\n Works as an asynchronous context manager which returns the instance itself on enter,\n and calls :meth:`aclose` on exit.\n """\n\n __slots__ = ()\n\n async def __aenter__(self: T) -> T:\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n await self.aclose()\n\n @abstractmethod\n async def aclose(self) -> None:\n """Close the resource."""\n
|
.venv\Lib\site-packages\anyio\abc\_resources.py
|
_resources.py
|
Python
| 783 | 0.85 | 0.181818 | 0 |
node-utils
| 545 |
2024-07-17T05:21:02.641721
|
MIT
| false |
50e97e20c6a15b00c62ed92dd538bab2
|
from __future__ import annotations\n\nimport socket\nfrom abc import abstractmethod\nfrom collections.abc import Callable, Collection, Mapping\nfrom contextlib import AsyncExitStack\nfrom io import IOBase\nfrom ipaddress import IPv4Address, IPv6Address\nfrom socket import AddressFamily\nfrom types import TracebackType\nfrom typing import Any, TypeVar, Union\n\nfrom .._core._typedattr import (\n TypedAttributeProvider,\n TypedAttributeSet,\n typed_attribute,\n)\nfrom ._streams import ByteStream, Listener, UnreliableObjectStream\nfrom ._tasks import TaskGroup\n\nIPAddressType = Union[str, IPv4Address, IPv6Address]\nIPSockAddrType = tuple[str, int]\nSockAddrType = Union[IPSockAddrType, str]\nUDPPacketType = tuple[bytes, IPSockAddrType]\nUNIXDatagramPacketType = tuple[bytes, str]\nT_Retval = TypeVar("T_Retval")\n\n\nclass _NullAsyncContextManager:\n async def __aenter__(self) -> None:\n pass\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n return None\n\n\nclass SocketAttribute(TypedAttributeSet):\n #: the address family of the underlying socket\n family: AddressFamily = typed_attribute()\n #: the local socket address of the underlying socket\n local_address: SockAddrType = typed_attribute()\n #: for IP addresses, the local port the underlying socket is bound to\n local_port: int = typed_attribute()\n #: the underlying stdlib socket object\n raw_socket: socket.socket = typed_attribute()\n #: the remote address the underlying socket is connected to\n remote_address: SockAddrType = typed_attribute()\n #: for IP addresses, the remote port the underlying socket is connected to\n remote_port: int = typed_attribute()\n\n\nclass _SocketProvider(TypedAttributeProvider):\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n from .._core._sockets import convert_ipv6_sockaddr as convert\n\n attributes: dict[Any, Callable[[], Any]] = {\n SocketAttribute.family: lambda: self._raw_socket.family,\n SocketAttribute.local_address: lambda: convert(\n self._raw_socket.getsockname()\n ),\n SocketAttribute.raw_socket: lambda: self._raw_socket,\n }\n try:\n peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())\n except OSError:\n peername = None\n\n # Provide the remote address for connected sockets\n if peername is not None:\n attributes[SocketAttribute.remote_address] = lambda: peername\n\n # Provide local and remote ports for IP based sockets\n if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):\n attributes[SocketAttribute.local_port] = (\n lambda: self._raw_socket.getsockname()[1]\n )\n if peername is not None:\n remote_port = peername[1]\n attributes[SocketAttribute.remote_port] = lambda: remote_port\n\n return attributes\n\n @property\n @abstractmethod\n def _raw_socket(self) -> socket.socket:\n pass\n\n\nclass SocketStream(ByteStream, _SocketProvider):\n """\n Transports bytes over a socket.\n\n Supports all relevant extra attributes from :class:`~SocketAttribute`.\n """\n\n\nclass UNIXSocketStream(SocketStream):\n @abstractmethod\n async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:\n """\n Send file descriptors along with a message to the peer.\n\n :param message: a non-empty bytestring\n :param fds: a collection of files (either numeric file descriptors or open file\n or socket objects)\n """\n\n @abstractmethod\n async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:\n """\n Receive file descriptors along with a message from the peer.\n\n :param msglen: length of the message to expect from the peer\n :param maxfds: maximum number of file descriptors to expect from the peer\n :return: a tuple of (message, file descriptors)\n """\n\n\nclass SocketListener(Listener[SocketStream], _SocketProvider):\n """\n Listens to incoming socket connections.\n\n Supports all relevant extra attributes from :class:`~SocketAttribute`.\n """\n\n @abstractmethod\n async def accept(self) -> SocketStream:\n """Accept an incoming connection."""\n\n async def serve(\n self,\n handler: Callable[[SocketStream], Any],\n task_group: TaskGroup | None = None,\n ) -> None:\n from .. import create_task_group\n\n async with AsyncExitStack() as stack:\n if task_group is None:\n task_group = await stack.enter_async_context(create_task_group())\n\n while True:\n stream = await self.accept()\n task_group.start_soon(handler, stream)\n\n\nclass UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):\n """\n Represents an unconnected UDP socket.\n\n Supports all relevant extra attributes from :class:`~SocketAttribute`.\n """\n\n async def sendto(self, data: bytes, host: str, port: int) -> None:\n """\n Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).\n\n """\n return await self.send((data, (host, port)))\n\n\nclass ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):\n """\n Represents an connected UDP socket.\n\n Supports all relevant extra attributes from :class:`~SocketAttribute`.\n """\n\n\nclass UNIXDatagramSocket(\n UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider\n):\n """\n Represents an unconnected Unix datagram socket.\n\n Supports all relevant extra attributes from :class:`~SocketAttribute`.\n """\n\n async def sendto(self, data: bytes, path: str) -> None:\n """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""\n return await self.send((data, path))\n\n\nclass ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):\n """\n Represents a connected Unix datagram socket.\n\n Supports all relevant extra attributes from :class:`~SocketAttribute`.\n """\n
|
.venv\Lib\site-packages\anyio\abc\_sockets.py
|
_sockets.py
|
Python
| 6,262 | 0.95 | 0.195876 | 0.053691 |
node-utils
| 236 |
2024-08-08T12:06:09.933983
|
MIT
| false |
8658545a0a8a38c8fe6593d49ce60265
|
from __future__ import annotations\n\nfrom abc import abstractmethod\nfrom collections.abc import Callable\nfrom typing import Any, Generic, TypeVar, Union\n\nfrom .._core._exceptions import EndOfStream\nfrom .._core._typedattr import TypedAttributeProvider\nfrom ._resources import AsyncResource\nfrom ._tasks import TaskGroup\n\nT_Item = TypeVar("T_Item")\nT_co = TypeVar("T_co", covariant=True)\nT_contra = TypeVar("T_contra", contravariant=True)\n\n\nclass UnreliableObjectReceiveStream(\n Generic[T_co], AsyncResource, TypedAttributeProvider\n):\n """\n An interface for receiving objects.\n\n This interface makes no guarantees that the received messages arrive in the order in\n which they were sent, or that no messages are missed.\n\n Asynchronously iterating over objects of this type will yield objects matching the\n given type parameter.\n """\n\n def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:\n return self\n\n async def __anext__(self) -> T_co:\n try:\n return await self.receive()\n except EndOfStream:\n raise StopAsyncIteration\n\n @abstractmethod\n async def receive(self) -> T_co:\n """\n Receive the next item.\n\n :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly\n closed\n :raises ~anyio.EndOfStream: if this stream has been closed from the other end\n :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable\n due to external causes\n """\n\n\nclass UnreliableObjectSendStream(\n Generic[T_contra], AsyncResource, TypedAttributeProvider\n):\n """\n An interface for sending objects.\n\n This interface makes no guarantees that the messages sent will reach the\n recipient(s) in the same order in which they were sent, or at all.\n """\n\n @abstractmethod\n async def send(self, item: T_contra) -> None:\n """\n Send an item to the peer(s).\n\n :param item: the item to send\n :raises ~anyio.ClosedResourceError: if the send stream has been explicitly\n closed\n :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable\n due to external causes\n """\n\n\nclass UnreliableObjectStream(\n UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]\n):\n """\n A bidirectional message stream which does not guarantee the order or reliability of\n message delivery.\n """\n\n\nclass ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):\n """\n A receive message stream which guarantees that messages are received in the same\n order in which they were sent, and that no messages are missed.\n """\n\n\nclass ObjectSendStream(UnreliableObjectSendStream[T_contra]):\n """\n A send message stream which guarantees that messages are delivered in the same order\n in which they were sent, without missing any messages in the middle.\n """\n\n\nclass ObjectStream(\n ObjectReceiveStream[T_Item],\n ObjectSendStream[T_Item],\n UnreliableObjectStream[T_Item],\n):\n """\n A bidirectional message stream which guarantees the order and reliability of message\n delivery.\n """\n\n @abstractmethod\n async def send_eof(self) -> None:\n """\n Send an end-of-file indication to the peer.\n\n You should not try to send any further data to this stream after calling this\n method. This method is idempotent (does nothing on successive calls).\n """\n\n\nclass ByteReceiveStream(AsyncResource, TypedAttributeProvider):\n """\n An interface for receiving bytes from a single peer.\n\n Iterating this byte stream will yield a byte string of arbitrary length, but no more\n than 65536 bytes.\n """\n\n def __aiter__(self) -> ByteReceiveStream:\n return self\n\n async def __anext__(self) -> bytes:\n try:\n return await self.receive()\n except EndOfStream:\n raise StopAsyncIteration\n\n @abstractmethod\n async def receive(self, max_bytes: int = 65536) -> bytes:\n """\n Receive at most ``max_bytes`` bytes from the peer.\n\n .. note:: Implementers of this interface should not return an empty\n :class:`bytes` object, and users should ignore them.\n\n :param max_bytes: maximum number of bytes to receive\n :return: the received bytes\n :raises ~anyio.EndOfStream: if this stream has been closed from the other end\n """\n\n\nclass ByteSendStream(AsyncResource, TypedAttributeProvider):\n """An interface for sending bytes to a single peer."""\n\n @abstractmethod\n async def send(self, item: bytes) -> None:\n """\n Send the given bytes to the peer.\n\n :param item: the bytes to send\n """\n\n\nclass ByteStream(ByteReceiveStream, ByteSendStream):\n """A bidirectional byte stream."""\n\n @abstractmethod\n async def send_eof(self) -> None:\n """\n Send an end-of-file indication to the peer.\n\n You should not try to send any further data to this stream after calling this\n method. This method is idempotent (does nothing on successive calls).\n """\n\n\n#: Type alias for all unreliable bytes-oriented receive streams.\nAnyUnreliableByteReceiveStream = Union[\n UnreliableObjectReceiveStream[bytes], ByteReceiveStream\n]\n#: Type alias for all unreliable bytes-oriented send streams.\nAnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]\n#: Type alias for all unreliable bytes-oriented streams.\nAnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]\n#: Type alias for all bytes-oriented receive streams.\nAnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]\n#: Type alias for all bytes-oriented send streams.\nAnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]\n#: Type alias for all bytes-oriented streams.\nAnyByteStream = Union[ObjectStream[bytes], ByteStream]\n\n\nclass Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):\n """An interface for objects that let you accept incoming connections."""\n\n @abstractmethod\n async def serve(\n self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None\n ) -> None:\n """\n Accept incoming connections as they come in and start tasks to handle them.\n\n :param handler: a callable that will be used to handle each accepted connection\n :param task_group: the task group that will be used to start tasks for handling\n each accepted connection (if omitted, an ad-hoc task group will be created)\n """\n
|
.venv\Lib\site-packages\anyio\abc\_streams.py
|
_streams.py
|
Python
| 6,598 | 0.95 | 0.221675 | 0.03871 |
node-utils
| 689 |
2023-08-08T20:10:58.440817
|
GPL-3.0
| false |
618c0ef06fd9a87c7ea1e8970657a99a
|
from __future__ import annotations\n\nfrom abc import abstractmethod\nfrom signal import Signals\n\nfrom ._resources import AsyncResource\nfrom ._streams import ByteReceiveStream, ByteSendStream\n\n\nclass Process(AsyncResource):\n """An asynchronous version of :class:`subprocess.Popen`."""\n\n @abstractmethod\n async def wait(self) -> int:\n """\n Wait until the process exits.\n\n :return: the exit code of the process\n """\n\n @abstractmethod\n def terminate(self) -> None:\n """\n Terminates the process, gracefully if possible.\n\n On Windows, this calls ``TerminateProcess()``.\n On POSIX systems, this sends ``SIGTERM`` to the process.\n\n .. seealso:: :meth:`subprocess.Popen.terminate`\n """\n\n @abstractmethod\n def kill(self) -> None:\n """\n Kills the process.\n\n On Windows, this calls ``TerminateProcess()``.\n On POSIX systems, this sends ``SIGKILL`` to the process.\n\n .. seealso:: :meth:`subprocess.Popen.kill`\n """\n\n @abstractmethod\n def send_signal(self, signal: Signals) -> None:\n """\n Send a signal to the subprocess.\n\n .. seealso:: :meth:`subprocess.Popen.send_signal`\n\n :param signal: the signal number (e.g. :data:`signal.SIGHUP`)\n """\n\n @property\n @abstractmethod\n def pid(self) -> int:\n """The process ID of the process."""\n\n @property\n @abstractmethod\n def returncode(self) -> int | None:\n """\n The return code of the process. If the process has not yet terminated, this will\n be ``None``.\n """\n\n @property\n @abstractmethod\n def stdin(self) -> ByteSendStream | None:\n """The stream for the standard input of the process."""\n\n @property\n @abstractmethod\n def stdout(self) -> ByteReceiveStream | None:\n """The stream for the standard output of the process."""\n\n @property\n @abstractmethod\n def stderr(self) -> ByteReceiveStream | None:\n """The stream for the standard error output of the process."""\n
|
.venv\Lib\site-packages\anyio\abc\_subprocesses.py
|
_subprocesses.py
|
Python
| 2,067 | 0.85 | 0.189873 | 0 |
node-utils
| 274 |
2023-08-26T01:06:31.811877
|
BSD-3-Clause
| false |
26fabd1bbe7ed971bb359e97df2d1254
|
from __future__ import annotations\n\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom collections.abc import Awaitable, Callable\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nif TYPE_CHECKING:\n from .._core._tasks import CancelScope\n\nT_Retval = TypeVar("T_Retval")\nT_contra = TypeVar("T_contra", contravariant=True)\nPosArgsT = TypeVarTuple("PosArgsT")\n\n\nclass TaskStatus(Protocol[T_contra]):\n @overload\n def started(self: TaskStatus[None]) -> None: ...\n\n @overload\n def started(self, value: T_contra) -> None: ...\n\n def started(self, value: T_contra | None = None) -> None:\n """\n Signal that the task has started.\n\n :param value: object passed back to the starter of the task\n """\n\n\nclass TaskGroup(metaclass=ABCMeta):\n """\n Groups several asynchronous tasks together.\n\n :ivar cancel_scope: the cancel scope inherited by all child tasks\n :vartype cancel_scope: CancelScope\n\n .. note:: On asyncio, support for eager task factories is considered to be\n **experimental**. In particular, they don't follow the usual semantics of new\n tasks being scheduled on the next iteration of the event loop, and may thus\n cause unexpected behavior in code that wasn't written with such semantics in\n mind.\n """\n\n cancel_scope: CancelScope\n\n @abstractmethod\n def start_soon(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],\n *args: Unpack[PosArgsT],\n name: object = None,\n ) -> None:\n """\n Start a new task in this task group.\n\n :param func: a coroutine function\n :param args: positional arguments to call the function with\n :param name: name of the task, for the purposes of introspection and debugging\n\n .. versionadded:: 3.0\n """\n\n @abstractmethod\n async def start(\n self,\n func: Callable[..., Awaitable[Any]],\n *args: object,\n name: object = None,\n ) -> Any:\n """\n Start a new task and wait until it signals for readiness.\n\n :param func: a coroutine function\n :param args: positional arguments to call the function with\n :param name: name of the task, for the purposes of introspection and debugging\n :return: the value passed to ``task_status.started()``\n :raises RuntimeError: if the task finishes without calling\n ``task_status.started()``\n\n .. versionadded:: 3.0\n """\n\n @abstractmethod\n async def __aenter__(self) -> TaskGroup:\n """Enter the task group context and allow starting new tasks."""\n\n @abstractmethod\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n """Exit the task group context waiting for all tasks to finish."""\n
|
.venv\Lib\site-packages\anyio\abc\_tasks.py
|
_tasks.py
|
Python
| 3,080 | 0.85 | 0.207921 | 0.037975 |
vue-tools
| 431 |
2023-08-13T03:17:13.168335
|
GPL-3.0
| false |
aec0b627fee8beaede570c62aefab408
|
from __future__ import annotations\n\nimport types\nfrom abc import ABCMeta, abstractmethod\nfrom collections.abc import AsyncGenerator, Callable, Coroutine, Iterable\nfrom typing import Any, TypeVar\n\n_T = TypeVar("_T")\n\n\nclass TestRunner(metaclass=ABCMeta):\n """\n Encapsulates a running event loop. Every call made through this object will use the\n same event loop.\n """\n\n def __enter__(self) -> TestRunner:\n return self\n\n @abstractmethod\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: types.TracebackType | None,\n ) -> bool | None: ...\n\n @abstractmethod\n def run_asyncgen_fixture(\n self,\n fixture_func: Callable[..., AsyncGenerator[_T, Any]],\n kwargs: dict[str, Any],\n ) -> Iterable[_T]:\n """\n Run an async generator fixture.\n\n :param fixture_func: the fixture function\n :param kwargs: keyword arguments to call the fixture function with\n :return: an iterator yielding the value yielded from the async generator\n """\n\n @abstractmethod\n def run_fixture(\n self,\n fixture_func: Callable[..., Coroutine[Any, Any, _T]],\n kwargs: dict[str, Any],\n ) -> _T:\n """\n Run an async fixture.\n\n :param fixture_func: the fixture function\n :param kwargs: keyword arguments to call the fixture function with\n :return: the return value of the fixture function\n """\n\n @abstractmethod\n def run_test(\n self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]\n ) -> None:\n """\n Run an async test function.\n\n :param test_func: the test function\n :param kwargs: keyword arguments to call the test function with\n """\n
|
.venv\Lib\site-packages\anyio\abc\_testing.py
|
_testing.py
|
Python
| 1,821 | 0.85 | 0.215385 | 0 |
awesome-app
| 445 |
2024-09-08T16:52:02.392341
|
Apache-2.0
| true |
8dd006396d407f0e7ab0e8223a0888d4
|
from __future__ import annotations\n\nfrom ._eventloop import AsyncBackend as AsyncBackend\nfrom ._resources import AsyncResource as AsyncResource\nfrom ._sockets import ConnectedUDPSocket as ConnectedUDPSocket\nfrom ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket\nfrom ._sockets import IPAddressType as IPAddressType\nfrom ._sockets import IPSockAddrType as IPSockAddrType\nfrom ._sockets import SocketAttribute as SocketAttribute\nfrom ._sockets import SocketListener as SocketListener\nfrom ._sockets import SocketStream as SocketStream\nfrom ._sockets import UDPPacketType as UDPPacketType\nfrom ._sockets import UDPSocket as UDPSocket\nfrom ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType\nfrom ._sockets import UNIXDatagramSocket as UNIXDatagramSocket\nfrom ._sockets import UNIXSocketStream as UNIXSocketStream\nfrom ._streams import AnyByteReceiveStream as AnyByteReceiveStream\nfrom ._streams import AnyByteSendStream as AnyByteSendStream\nfrom ._streams import AnyByteStream as AnyByteStream\nfrom ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream\nfrom ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream\nfrom ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream\nfrom ._streams import ByteReceiveStream as ByteReceiveStream\nfrom ._streams import ByteSendStream as ByteSendStream\nfrom ._streams import ByteStream as ByteStream\nfrom ._streams import Listener as Listener\nfrom ._streams import ObjectReceiveStream as ObjectReceiveStream\nfrom ._streams import ObjectSendStream as ObjectSendStream\nfrom ._streams import ObjectStream as ObjectStream\nfrom ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream\nfrom ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream\nfrom ._streams import UnreliableObjectStream as UnreliableObjectStream\nfrom ._subprocesses import Process as Process\nfrom ._tasks import TaskGroup as TaskGroup\nfrom ._tasks import TaskStatus as TaskStatus\nfrom ._testing import TestRunner as TestRunner\n\n# Re-exported here, for backwards compatibility\n# isort: off\nfrom .._core._synchronization import (\n CapacityLimiter as CapacityLimiter,\n Condition as Condition,\n Event as Event,\n Lock as Lock,\n Semaphore as Semaphore,\n)\nfrom .._core._tasks import CancelScope as CancelScope\nfrom ..from_thread import BlockingPortal as BlockingPortal\n\n# Re-export imports so they look like they live directly in this package\nfor __value in list(locals().values()):\n if getattr(__value, "__module__", "").startswith("anyio.abc."):\n __value.__module__ = __name__\n\ndel __value\n
|
.venv\Lib\site-packages\anyio\abc\__init__.py
|
__init__.py
|
Python
| 2,652 | 0.95 | 0.054545 | 0.058824 |
python-kit
| 386 |
2024-01-04T21:14:49.888296
|
GPL-3.0
| false |
7060ac9ce76d907c20f5e0e8c805fd83
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_eventloop.cpython-313.pyc
|
_eventloop.cpython-313.pyc
|
Other
| 14,751 | 0.95 | 0.056452 | 0.004237 |
node-utils
| 888 |
2024-03-23T03:44:35.297957
|
Apache-2.0
| false |
5fb98562a9c18acf00d5b688489ab875
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_resources.cpython-313.pyc
|
_resources.cpython-313.pyc
|
Other
| 1,667 | 0.95 | 0.08 | 0 |
awesome-app
| 7 |
2024-08-31T16:06:24.514678
|
GPL-3.0
| false |
f326b3c47d9220721bccfb20018c43c2
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_sockets.cpython-313.pyc
|
_sockets.cpython-313.pyc
|
Other
| 9,774 | 0.8 | 0.075472 | 0 |
awesome-app
| 999 |
2023-11-23T05:23:30.628053
|
BSD-3-Clause
| false |
a86798d82a5461cc3fbf08d4c167e04f
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_streams.cpython-313.pyc
|
_streams.cpython-313.pyc
|
Other
| 8,342 | 0.8 | 0.141509 | 0 |
python-kit
| 292 |
2023-12-14T09:37:57.624810
|
MIT
| false |
f4eeafd8063c4abd02154050b855c02b
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_subprocesses.cpython-313.pyc
|
_subprocesses.cpython-313.pyc
|
Other
| 3,156 | 0.8 | 0.1 | 0 |
vue-tools
| 393 |
2024-11-07T04:34:18.719364
|
BSD-3-Clause
| false |
87b4f47dc44537da68e355c569af823e
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_tasks.cpython-313.pyc
|
_tasks.cpython-313.pyc
|
Other
| 4,414 | 0.95 | 0.140845 | 0.032258 |
python-kit
| 658 |
2024-12-06T10:07:59.037793
|
MIT
| false |
50d19c90d411aaf03391d6caadbb7060
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\_testing.cpython-313.pyc
|
_testing.cpython-313.pyc
|
Other
| 2,771 | 0.85 | 0.195122 | 0 |
react-lib
| 240 |
2024-10-26T01:53:59.338579
|
BSD-3-Clause
| true |
3f79d24a1297ccab55a77d8b94dc1cb8
|
\n\n
|
.venv\Lib\site-packages\anyio\abc\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 2,240 | 0.8 | 0 | 0 |
python-kit
| 477 |
2025-01-03T22:29:47.470591
|
MIT
| false |
a9e2ab7b27ebd7372fa5e379bea2014f
|
from __future__ import annotations\n\nfrom collections.abc import Callable, Mapping\nfrom dataclasses import dataclass, field\nfrom typing import Any\n\nfrom .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead\nfrom ..abc import AnyByteReceiveStream, ByteReceiveStream\n\n\n@dataclass(eq=False)\nclass BufferedByteReceiveStream(ByteReceiveStream):\n """\n Wraps any bytes-based receive stream and uses a buffer to provide sophisticated\n receiving capabilities in the form of a byte stream.\n """\n\n receive_stream: AnyByteReceiveStream\n _buffer: bytearray = field(init=False, default_factory=bytearray)\n _closed: bool = field(init=False, default=False)\n\n async def aclose(self) -> None:\n await self.receive_stream.aclose()\n self._closed = True\n\n @property\n def buffer(self) -> bytes:\n """The bytes currently in the buffer."""\n return bytes(self._buffer)\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return self.receive_stream.extra_attributes\n\n async def receive(self, max_bytes: int = 65536) -> bytes:\n if self._closed:\n raise ClosedResourceError\n\n if self._buffer:\n chunk = bytes(self._buffer[:max_bytes])\n del self._buffer[:max_bytes]\n return chunk\n elif isinstance(self.receive_stream, ByteReceiveStream):\n return await self.receive_stream.receive(max_bytes)\n else:\n # With a bytes-oriented object stream, we need to handle any surplus bytes\n # we get from the receive() call\n chunk = await self.receive_stream.receive()\n if len(chunk) > max_bytes:\n # Save the surplus bytes in the buffer\n self._buffer.extend(chunk[max_bytes:])\n return chunk[:max_bytes]\n else:\n return chunk\n\n async def receive_exactly(self, nbytes: int) -> bytes:\n """\n Read exactly the given amount of bytes from the stream.\n\n :param nbytes: the number of bytes to read\n :return: the bytes read\n :raises ~anyio.IncompleteRead: if the stream was closed before the requested\n amount of bytes could be read from the stream\n\n """\n while True:\n remaining = nbytes - len(self._buffer)\n if remaining <= 0:\n retval = self._buffer[:nbytes]\n del self._buffer[:nbytes]\n return bytes(retval)\n\n try:\n if isinstance(self.receive_stream, ByteReceiveStream):\n chunk = await self.receive_stream.receive(remaining)\n else:\n chunk = await self.receive_stream.receive()\n except EndOfStream as exc:\n raise IncompleteRead from exc\n\n self._buffer.extend(chunk)\n\n async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:\n """\n Read from the stream until the delimiter is found or max_bytes have been read.\n\n :param delimiter: the marker to look for in the stream\n :param max_bytes: maximum number of bytes that will be read before raising\n :exc:`~anyio.DelimiterNotFound`\n :return: the bytes read (not including the delimiter)\n :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter\n was found\n :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the\n bytes read up to the maximum allowed\n\n """\n delimiter_size = len(delimiter)\n offset = 0\n while True:\n # Check if the delimiter can be found in the current buffer\n index = self._buffer.find(delimiter, offset)\n if index >= 0:\n found = self._buffer[:index]\n del self._buffer[: index + len(delimiter) :]\n return bytes(found)\n\n # Check if the buffer is already at or over the limit\n if len(self._buffer) >= max_bytes:\n raise DelimiterNotFound(max_bytes)\n\n # Read more data into the buffer from the socket\n try:\n data = await self.receive_stream.receive()\n except EndOfStream as exc:\n raise IncompleteRead from exc\n\n # Move the offset forward and add the new data to the buffer\n offset = max(len(self._buffer) - delimiter_size + 1, 0)\n self._buffer.extend(data)\n
|
.venv\Lib\site-packages\anyio\streams\buffered.py
|
buffered.py
|
Python
| 4,500 | 0.95 | 0.201681 | 0.071429 |
awesome-app
| 359 |
2024-05-03T14:22:18.903010
|
BSD-3-Clause
| false |
5e0b5884fbe6ded9dab3ae4288fb2fac
|
from __future__ import annotations\n\nfrom collections.abc import Callable, Mapping\nfrom io import SEEK_SET, UnsupportedOperation\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Any, BinaryIO, cast\n\nfrom .. import (\n BrokenResourceError,\n ClosedResourceError,\n EndOfStream,\n TypedAttributeSet,\n to_thread,\n typed_attribute,\n)\nfrom ..abc import ByteReceiveStream, ByteSendStream\n\n\nclass FileStreamAttribute(TypedAttributeSet):\n #: the open file descriptor\n file: BinaryIO = typed_attribute()\n #: the path of the file on the file system, if available (file must be a real file)\n path: Path = typed_attribute()\n #: the file number, if available (file must be a real file or a TTY)\n fileno: int = typed_attribute()\n\n\nclass _BaseFileStream:\n def __init__(self, file: BinaryIO):\n self._file = file\n\n async def aclose(self) -> None:\n await to_thread.run_sync(self._file.close)\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n attributes: dict[Any, Callable[[], Any]] = {\n FileStreamAttribute.file: lambda: self._file,\n }\n\n if hasattr(self._file, "name"):\n attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)\n\n try:\n self._file.fileno()\n except UnsupportedOperation:\n pass\n else:\n attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()\n\n return attributes\n\n\nclass FileReadStream(_BaseFileStream, ByteReceiveStream):\n """\n A byte stream that reads from a file in the file system.\n\n :param file: a file that has been opened for reading in binary mode\n\n .. versionadded:: 3.0\n """\n\n @classmethod\n async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:\n """\n Create a file read stream by opening the given file.\n\n :param path: path of the file to read from\n\n """\n file = await to_thread.run_sync(Path(path).open, "rb")\n return cls(cast(BinaryIO, file))\n\n async def receive(self, max_bytes: int = 65536) -> bytes:\n try:\n data = await to_thread.run_sync(self._file.read, max_bytes)\n except ValueError:\n raise ClosedResourceError from None\n except OSError as exc:\n raise BrokenResourceError from exc\n\n if data:\n return data\n else:\n raise EndOfStream\n\n async def seek(self, position: int, whence: int = SEEK_SET) -> int:\n """\n Seek the file to the given position.\n\n .. seealso:: :meth:`io.IOBase.seek`\n\n .. note:: Not all file descriptors are seekable.\n\n :param position: position to seek the file to\n :param whence: controls how ``position`` is interpreted\n :return: the new absolute position\n :raises OSError: if the file is not seekable\n\n """\n return await to_thread.run_sync(self._file.seek, position, whence)\n\n async def tell(self) -> int:\n """\n Return the current stream position.\n\n .. note:: Not all file descriptors are seekable.\n\n :return: the current absolute position\n :raises OSError: if the file is not seekable\n\n """\n return await to_thread.run_sync(self._file.tell)\n\n\nclass FileWriteStream(_BaseFileStream, ByteSendStream):\n """\n A byte stream that writes to a file in the file system.\n\n :param file: a file that has been opened for writing in binary mode\n\n .. versionadded:: 3.0\n """\n\n @classmethod\n async def from_path(\n cls, path: str | PathLike[str], append: bool = False\n ) -> FileWriteStream:\n """\n Create a file write stream by opening the given file for writing.\n\n :param path: path of the file to write to\n :param append: if ``True``, open the file for appending; if ``False``, any\n existing file at the given path will be truncated\n\n """\n mode = "ab" if append else "wb"\n file = await to_thread.run_sync(Path(path).open, mode)\n return cls(cast(BinaryIO, file))\n\n async def send(self, item: bytes) -> None:\n try:\n await to_thread.run_sync(self._file.write, item)\n except ValueError:\n raise ClosedResourceError from None\n except OSError as exc:\n raise BrokenResourceError from exc\n
|
.venv\Lib\site-packages\anyio\streams\file.py
|
file.py
|
Python
| 4,383 | 0.95 | 0.195946 | 0.027027 |
vue-tools
| 993 |
2025-04-01T15:08:18.335901
|
Apache-2.0
| false |
5deb64edb8a243176cc1095d34dd7f1e
|
from __future__ import annotations\n\nimport warnings\nfrom collections import OrderedDict, deque\nfrom dataclasses import dataclass, field\nfrom types import TracebackType\nfrom typing import Generic, NamedTuple, TypeVar\n\nfrom .. import (\n BrokenResourceError,\n ClosedResourceError,\n EndOfStream,\n WouldBlock,\n)\nfrom .._core._testing import TaskInfo, get_current_task\nfrom ..abc import Event, ObjectReceiveStream, ObjectSendStream\nfrom ..lowlevel import checkpoint\n\nT_Item = TypeVar("T_Item")\nT_co = TypeVar("T_co", covariant=True)\nT_contra = TypeVar("T_contra", contravariant=True)\n\n\nclass MemoryObjectStreamStatistics(NamedTuple):\n current_buffer_used: int #: number of items stored in the buffer\n #: maximum number of items that can be stored on this stream (or :data:`math.inf`)\n max_buffer_size: float\n open_send_streams: int #: number of unclosed clones of the send stream\n open_receive_streams: int #: number of unclosed clones of the receive stream\n #: number of tasks blocked on :meth:`MemoryObjectSendStream.send`\n tasks_waiting_send: int\n #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`\n tasks_waiting_receive: int\n\n\n@dataclass(eq=False)\nclass MemoryObjectItemReceiver(Generic[T_Item]):\n task_info: TaskInfo = field(init=False, default_factory=get_current_task)\n item: T_Item = field(init=False)\n\n def __repr__(self) -> str:\n # When item is not defined, we get following error with default __repr__:\n # AttributeError: 'MemoryObjectItemReceiver' object has no attribute 'item'\n item = getattr(self, "item", None)\n return f"{self.__class__.__name__}(task_info={self.task_info}, item={item!r})"\n\n\n@dataclass(eq=False)\nclass MemoryObjectStreamState(Generic[T_Item]):\n max_buffer_size: float = field()\n buffer: deque[T_Item] = field(init=False, default_factory=deque)\n open_send_channels: int = field(init=False, default=0)\n open_receive_channels: int = field(init=False, default=0)\n waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field(\n init=False, default_factory=OrderedDict\n )\n waiting_senders: OrderedDict[Event, T_Item] = field(\n init=False, default_factory=OrderedDict\n )\n\n def statistics(self) -> MemoryObjectStreamStatistics:\n return MemoryObjectStreamStatistics(\n len(self.buffer),\n self.max_buffer_size,\n self.open_send_channels,\n self.open_receive_channels,\n len(self.waiting_senders),\n len(self.waiting_receivers),\n )\n\n\n@dataclass(eq=False)\nclass MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):\n _state: MemoryObjectStreamState[T_co]\n _closed: bool = field(init=False, default=False)\n\n def __post_init__(self) -> None:\n self._state.open_receive_channels += 1\n\n def receive_nowait(self) -> T_co:\n """\n Receive the next item if it can be done without waiting.\n\n :return: the received item\n :raises ~anyio.ClosedResourceError: if this send stream has been closed\n :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been\n closed from the sending end\n :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks\n waiting to send\n\n """\n if self._closed:\n raise ClosedResourceError\n\n if self._state.waiting_senders:\n # Get the item from the next sender\n send_event, item = self._state.waiting_senders.popitem(last=False)\n self._state.buffer.append(item)\n send_event.set()\n\n if self._state.buffer:\n return self._state.buffer.popleft()\n elif not self._state.open_send_channels:\n raise EndOfStream\n\n raise WouldBlock\n\n async def receive(self) -> T_co:\n await checkpoint()\n try:\n return self.receive_nowait()\n except WouldBlock:\n # Add ourselves in the queue\n receive_event = Event()\n receiver = MemoryObjectItemReceiver[T_co]()\n self._state.waiting_receivers[receive_event] = receiver\n\n try:\n await receive_event.wait()\n finally:\n self._state.waiting_receivers.pop(receive_event, None)\n\n try:\n return receiver.item\n except AttributeError:\n raise EndOfStream from None\n\n def clone(self) -> MemoryObjectReceiveStream[T_co]:\n """\n Create a clone of this receive stream.\n\n Each clone can be closed separately. Only when all clones have been closed will\n the receiving end of the memory stream be considered closed by the sending ends.\n\n :return: the cloned stream\n\n """\n if self._closed:\n raise ClosedResourceError\n\n return MemoryObjectReceiveStream(_state=self._state)\n\n def close(self) -> None:\n """\n Close the stream.\n\n This works the exact same way as :meth:`aclose`, but is provided as a special\n case for the benefit of synchronous callbacks.\n\n """\n if not self._closed:\n self._closed = True\n self._state.open_receive_channels -= 1\n if self._state.open_receive_channels == 0:\n send_events = list(self._state.waiting_senders.keys())\n for event in send_events:\n event.set()\n\n async def aclose(self) -> None:\n self.close()\n\n def statistics(self) -> MemoryObjectStreamStatistics:\n """\n Return statistics about the current state of this stream.\n\n .. versionadded:: 3.0\n """\n return self._state.statistics()\n\n def __enter__(self) -> MemoryObjectReceiveStream[T_co]:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self.close()\n\n def __del__(self) -> None:\n if not self._closed:\n warnings.warn(\n f"Unclosed <{self.__class__.__name__} at {id(self):x}>",\n ResourceWarning,\n source=self,\n )\n\n\n@dataclass(eq=False)\nclass MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):\n _state: MemoryObjectStreamState[T_contra]\n _closed: bool = field(init=False, default=False)\n\n def __post_init__(self) -> None:\n self._state.open_send_channels += 1\n\n def send_nowait(self, item: T_contra) -> None:\n """\n Send an item immediately if it can be done without waiting.\n\n :param item: the item to send\n :raises ~anyio.ClosedResourceError: if this send stream has been closed\n :raises ~anyio.BrokenResourceError: if the stream has been closed from the\n receiving end\n :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting\n to receive\n\n """\n if self._closed:\n raise ClosedResourceError\n if not self._state.open_receive_channels:\n raise BrokenResourceError\n\n while self._state.waiting_receivers:\n receive_event, receiver = self._state.waiting_receivers.popitem(last=False)\n if not receiver.task_info.has_pending_cancellation():\n receiver.item = item\n receive_event.set()\n return\n\n if len(self._state.buffer) < self._state.max_buffer_size:\n self._state.buffer.append(item)\n else:\n raise WouldBlock\n\n async def send(self, item: T_contra) -> None:\n """\n Send an item to the stream.\n\n If the buffer is full, this method blocks until there is again room in the\n buffer or the item can be sent directly to a receiver.\n\n :param item: the item to send\n :raises ~anyio.ClosedResourceError: if this send stream has been closed\n :raises ~anyio.BrokenResourceError: if the stream has been closed from the\n receiving end\n\n """\n await checkpoint()\n try:\n self.send_nowait(item)\n except WouldBlock:\n # Wait until there's someone on the receiving end\n send_event = Event()\n self._state.waiting_senders[send_event] = item\n try:\n await send_event.wait()\n except BaseException:\n self._state.waiting_senders.pop(send_event, None)\n raise\n\n if send_event in self._state.waiting_senders:\n del self._state.waiting_senders[send_event]\n raise BrokenResourceError from None\n\n def clone(self) -> MemoryObjectSendStream[T_contra]:\n """\n Create a clone of this send stream.\n\n Each clone can be closed separately. Only when all clones have been closed will\n the sending end of the memory stream be considered closed by the receiving ends.\n\n :return: the cloned stream\n\n """\n if self._closed:\n raise ClosedResourceError\n\n return MemoryObjectSendStream(_state=self._state)\n\n def close(self) -> None:\n """\n Close the stream.\n\n This works the exact same way as :meth:`aclose`, but is provided as a special\n case for the benefit of synchronous callbacks.\n\n """\n if not self._closed:\n self._closed = True\n self._state.open_send_channels -= 1\n if self._state.open_send_channels == 0:\n receive_events = list(self._state.waiting_receivers.keys())\n self._state.waiting_receivers.clear()\n for event in receive_events:\n event.set()\n\n async def aclose(self) -> None:\n self.close()\n\n def statistics(self) -> MemoryObjectStreamStatistics:\n """\n Return statistics about the current state of this stream.\n\n .. versionadded:: 3.0\n """\n return self._state.statistics()\n\n def __enter__(self) -> MemoryObjectSendStream[T_contra]:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self.close()\n\n def __del__(self) -> None:\n if not self._closed:\n warnings.warn(\n f"Unclosed <{self.__class__.__name__} at {id(self):x}>",\n ResourceWarning,\n source=self,\n )\n
|
.venv\Lib\site-packages\anyio\streams\memory.py
|
memory.py
|
Python
| 10,560 | 0.95 | 0.198738 | 0.031621 |
awesome-app
| 640 |
2023-09-18T10:54:24.992166
|
GPL-3.0
| false |
2101b046f9c2bb28cfe9c6ff888a19c8
|
from __future__ import annotations\n\nfrom collections.abc import Callable, Mapping, Sequence\nfrom dataclasses import dataclass\nfrom typing import Any, Generic, TypeVar\n\nfrom ..abc import (\n ByteReceiveStream,\n ByteSendStream,\n ByteStream,\n Listener,\n ObjectReceiveStream,\n ObjectSendStream,\n ObjectStream,\n TaskGroup,\n)\n\nT_Item = TypeVar("T_Item")\nT_Stream = TypeVar("T_Stream")\n\n\n@dataclass(eq=False)\nclass StapledByteStream(ByteStream):\n """\n Combines two byte streams into a single, bidirectional byte stream.\n\n Extra attributes will be provided from both streams, with the receive stream\n providing the values in case of a conflict.\n\n :param ByteSendStream send_stream: the sending byte stream\n :param ByteReceiveStream receive_stream: the receiving byte stream\n """\n\n send_stream: ByteSendStream\n receive_stream: ByteReceiveStream\n\n async def receive(self, max_bytes: int = 65536) -> bytes:\n return await self.receive_stream.receive(max_bytes)\n\n async def send(self, item: bytes) -> None:\n await self.send_stream.send(item)\n\n async def send_eof(self) -> None:\n await self.send_stream.aclose()\n\n async def aclose(self) -> None:\n await self.send_stream.aclose()\n await self.receive_stream.aclose()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return {\n **self.send_stream.extra_attributes,\n **self.receive_stream.extra_attributes,\n }\n\n\n@dataclass(eq=False)\nclass StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):\n """\n Combines two object streams into a single, bidirectional object stream.\n\n Extra attributes will be provided from both streams, with the receive stream\n providing the values in case of a conflict.\n\n :param ObjectSendStream send_stream: the sending object stream\n :param ObjectReceiveStream receive_stream: the receiving object stream\n """\n\n send_stream: ObjectSendStream[T_Item]\n receive_stream: ObjectReceiveStream[T_Item]\n\n async def receive(self) -> T_Item:\n return await self.receive_stream.receive()\n\n async def send(self, item: T_Item) -> None:\n await self.send_stream.send(item)\n\n async def send_eof(self) -> None:\n await self.send_stream.aclose()\n\n async def aclose(self) -> None:\n await self.send_stream.aclose()\n await self.receive_stream.aclose()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return {\n **self.send_stream.extra_attributes,\n **self.receive_stream.extra_attributes,\n }\n\n\n@dataclass(eq=False)\nclass MultiListener(Generic[T_Stream], Listener[T_Stream]):\n """\n Combines multiple listeners into one, serving connections from all of them at once.\n\n Any MultiListeners in the given collection of listeners will have their listeners\n moved into this one.\n\n Extra attributes are provided from each listener, with each successive listener\n overriding any conflicting attributes from the previous one.\n\n :param listeners: listeners to serve\n :type listeners: Sequence[Listener[T_Stream]]\n """\n\n listeners: Sequence[Listener[T_Stream]]\n\n def __post_init__(self) -> None:\n listeners: list[Listener[T_Stream]] = []\n for listener in self.listeners:\n if isinstance(listener, MultiListener):\n listeners.extend(listener.listeners)\n del listener.listeners[:] # type: ignore[attr-defined]\n else:\n listeners.append(listener)\n\n self.listeners = listeners\n\n async def serve(\n self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None\n ) -> None:\n from .. import create_task_group\n\n async with create_task_group() as tg:\n for listener in self.listeners:\n tg.start_soon(listener.serve, handler, task_group)\n\n async def aclose(self) -> None:\n for listener in self.listeners:\n await listener.aclose()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n attributes: dict = {}\n for listener in self.listeners:\n attributes.update(listener.extra_attributes)\n\n return attributes\n
|
.venv\Lib\site-packages\anyio\streams\stapled.py
|
stapled.py
|
Python
| 4,302 | 0.95 | 0.156028 | 0.038095 |
vue-tools
| 195 |
2024-09-07T12:33:54.793916
|
GPL-3.0
| false |
66228731bd182fb136250c1aad54d67d
|
from __future__ import annotations\n\nimport codecs\nfrom collections.abc import Callable, Mapping\nfrom dataclasses import InitVar, dataclass, field\nfrom typing import Any\n\nfrom ..abc import (\n AnyByteReceiveStream,\n AnyByteSendStream,\n AnyByteStream,\n ObjectReceiveStream,\n ObjectSendStream,\n ObjectStream,\n)\n\n\n@dataclass(eq=False)\nclass TextReceiveStream(ObjectReceiveStream[str]):\n """\n Stream wrapper that decodes bytes to strings using the given encoding.\n\n Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any\n completely received unicode characters as soon as they come in.\n\n :param transport_stream: any bytes-based receive stream\n :param encoding: character encoding to use for decoding bytes to strings (defaults\n to ``utf-8``)\n :param errors: handling scheme for decoding errors (defaults to ``strict``; see the\n `codecs module documentation`_ for a comprehensive list of options)\n\n .. _codecs module documentation:\n https://docs.python.org/3/library/codecs.html#codec-objects\n """\n\n transport_stream: AnyByteReceiveStream\n encoding: InitVar[str] = "utf-8"\n errors: InitVar[str] = "strict"\n _decoder: codecs.IncrementalDecoder = field(init=False)\n\n def __post_init__(self, encoding: str, errors: str) -> None:\n decoder_class = codecs.getincrementaldecoder(encoding)\n self._decoder = decoder_class(errors=errors)\n\n async def receive(self) -> str:\n while True:\n chunk = await self.transport_stream.receive()\n decoded = self._decoder.decode(chunk)\n if decoded:\n return decoded\n\n async def aclose(self) -> None:\n await self.transport_stream.aclose()\n self._decoder.reset()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return self.transport_stream.extra_attributes\n\n\n@dataclass(eq=False)\nclass TextSendStream(ObjectSendStream[str]):\n """\n Sends strings to the wrapped stream as bytes using the given encoding.\n\n :param AnyByteSendStream transport_stream: any bytes-based send stream\n :param str encoding: character encoding to use for encoding strings to bytes\n (defaults to ``utf-8``)\n :param str errors: handling scheme for encoding errors (defaults to ``strict``; see\n the `codecs module documentation`_ for a comprehensive list of options)\n\n .. _codecs module documentation:\n https://docs.python.org/3/library/codecs.html#codec-objects\n """\n\n transport_stream: AnyByteSendStream\n encoding: InitVar[str] = "utf-8"\n errors: str = "strict"\n _encoder: Callable[..., tuple[bytes, int]] = field(init=False)\n\n def __post_init__(self, encoding: str) -> None:\n self._encoder = codecs.getencoder(encoding)\n\n async def send(self, item: str) -> None:\n encoded = self._encoder(item, self.errors)[0]\n await self.transport_stream.send(encoded)\n\n async def aclose(self) -> None:\n await self.transport_stream.aclose()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return self.transport_stream.extra_attributes\n\n\n@dataclass(eq=False)\nclass TextStream(ObjectStream[str]):\n """\n A bidirectional stream that decodes bytes to strings on receive and encodes strings\n to bytes on send.\n\n Extra attributes will be provided from both streams, with the receive stream\n providing the values in case of a conflict.\n\n :param AnyByteStream transport_stream: any bytes-based stream\n :param str encoding: character encoding to use for encoding/decoding strings to/from\n bytes (defaults to ``utf-8``)\n :param str errors: handling scheme for encoding errors (defaults to ``strict``; see\n the `codecs module documentation`_ for a comprehensive list of options)\n\n .. _codecs module documentation:\n https://docs.python.org/3/library/codecs.html#codec-objects\n """\n\n transport_stream: AnyByteStream\n encoding: InitVar[str] = "utf-8"\n errors: InitVar[str] = "strict"\n _receive_stream: TextReceiveStream = field(init=False)\n _send_stream: TextSendStream = field(init=False)\n\n def __post_init__(self, encoding: str, errors: str) -> None:\n self._receive_stream = TextReceiveStream(\n self.transport_stream, encoding=encoding, errors=errors\n )\n self._send_stream = TextSendStream(\n self.transport_stream, encoding=encoding, errors=errors\n )\n\n async def receive(self) -> str:\n return await self._receive_stream.receive()\n\n async def send(self, item: str) -> None:\n await self._send_stream.send(item)\n\n async def send_eof(self) -> None:\n await self.transport_stream.send_eof()\n\n async def aclose(self) -> None:\n await self._send_stream.aclose()\n await self._receive_stream.aclose()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return {\n **self._send_stream.extra_attributes,\n **self._receive_stream.extra_attributes,\n }\n
|
.venv\Lib\site-packages\anyio\streams\text.py
|
text.py
|
Python
| 5,094 | 0.95 | 0.197279 | 0.017544 |
awesome-app
| 644 |
2024-03-06T01:23:05.295403
|
MIT
| false |
608291cc437db1dbe5e4190f21ae9002
|
from __future__ import annotations\n\nimport logging\nimport re\nimport ssl\nimport sys\nfrom collections.abc import Callable, Mapping\nfrom dataclasses import dataclass\nfrom functools import wraps\nfrom typing import Any, TypeVar\n\nfrom .. import (\n BrokenResourceError,\n EndOfStream,\n aclose_forcefully,\n get_cancelled_exc_class,\n to_thread,\n)\nfrom .._core._typedattr import TypedAttributeSet, typed_attribute\nfrom ..abc import AnyByteStream, ByteStream, Listener, TaskGroup\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nT_Retval = TypeVar("T_Retval")\nPosArgsT = TypeVarTuple("PosArgsT")\n_PCTRTT = tuple[tuple[str, str], ...]\n_PCTRTTT = tuple[_PCTRTT, ...]\n\n\nclass TLSAttribute(TypedAttributeSet):\n """Contains Transport Layer Security related attributes."""\n\n #: the selected ALPN protocol\n alpn_protocol: str | None = typed_attribute()\n #: the channel binding for type ``tls-unique``\n channel_binding_tls_unique: bytes = typed_attribute()\n #: the selected cipher\n cipher: tuple[str, str, int] = typed_attribute()\n #: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`\n # for more information)\n peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()\n #: the peer certificate in binary form\n peer_certificate_binary: bytes | None = typed_attribute()\n #: ``True`` if this is the server side of the connection\n server_side: bool = typed_attribute()\n #: ciphers shared by the client during the TLS handshake (``None`` if this is the\n #: client side)\n shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()\n #: the :class:`~ssl.SSLObject` used for encryption\n ssl_object: ssl.SSLObject = typed_attribute()\n #: ``True`` if this stream does (and expects) a closing TLS handshake when the\n #: stream is being closed\n standard_compatible: bool = typed_attribute()\n #: the TLS protocol version (e.g. ``TLSv1.2``)\n tls_version: str = typed_attribute()\n\n\n@dataclass(eq=False)\nclass TLSStream(ByteStream):\n """\n A stream wrapper that encrypts all sent data and decrypts received data.\n\n This class has no public initializer; use :meth:`wrap` instead.\n All extra attributes from :class:`~TLSAttribute` are supported.\n\n :var AnyByteStream transport_stream: the wrapped stream\n\n """\n\n transport_stream: AnyByteStream\n standard_compatible: bool\n _ssl_object: ssl.SSLObject\n _read_bio: ssl.MemoryBIO\n _write_bio: ssl.MemoryBIO\n\n @classmethod\n async def wrap(\n cls,\n transport_stream: AnyByteStream,\n *,\n server_side: bool | None = None,\n hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n standard_compatible: bool = True,\n ) -> TLSStream:\n """\n Wrap an existing stream with Transport Layer Security.\n\n This performs a TLS handshake with the peer.\n\n :param transport_stream: a bytes-transporting stream to wrap\n :param server_side: ``True`` if this is the server side of the connection,\n ``False`` if this is the client side (if omitted, will be set to ``False``\n if ``hostname`` has been provided, ``False`` otherwise). Used only to create\n a default context when an explicit context has not been provided.\n :param hostname: host name of the peer (if host name checking is desired)\n :param ssl_context: the SSLContext object to use (if not provided, a secure\n default will be created)\n :param standard_compatible: if ``False``, skip the closing handshake when\n closing the connection, and don't raise an exception if the peer does the\n same\n :raises ~ssl.SSLError: if the TLS handshake fails\n\n """\n if server_side is None:\n server_side = not hostname\n\n if not ssl_context:\n purpose = (\n ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH\n )\n ssl_context = ssl.create_default_context(purpose)\n\n # Re-enable detection of unexpected EOFs if it was disabled by Python\n if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):\n ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF\n\n bio_in = ssl.MemoryBIO()\n bio_out = ssl.MemoryBIO()\n\n # External SSLContext implementations may do blocking I/O in wrap_bio(),\n # but the standard library implementation won't\n if type(ssl_context) is ssl.SSLContext:\n ssl_object = ssl_context.wrap_bio(\n bio_in, bio_out, server_side=server_side, server_hostname=hostname\n )\n else:\n ssl_object = await to_thread.run_sync(\n ssl_context.wrap_bio,\n bio_in,\n bio_out,\n server_side,\n hostname,\n None,\n )\n\n wrapper = cls(\n transport_stream=transport_stream,\n standard_compatible=standard_compatible,\n _ssl_object=ssl_object,\n _read_bio=bio_in,\n _write_bio=bio_out,\n )\n await wrapper._call_sslobject_method(ssl_object.do_handshake)\n return wrapper\n\n async def _call_sslobject_method(\n self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]\n ) -> T_Retval:\n while True:\n try:\n result = func(*args)\n except ssl.SSLWantReadError:\n try:\n # Flush any pending writes first\n if self._write_bio.pending:\n await self.transport_stream.send(self._write_bio.read())\n\n data = await self.transport_stream.receive()\n except EndOfStream:\n self._read_bio.write_eof()\n except OSError as exc:\n self._read_bio.write_eof()\n self._write_bio.write_eof()\n raise BrokenResourceError from exc\n else:\n self._read_bio.write(data)\n except ssl.SSLWantWriteError:\n await self.transport_stream.send(self._write_bio.read())\n except ssl.SSLSyscallError as exc:\n self._read_bio.write_eof()\n self._write_bio.write_eof()\n raise BrokenResourceError from exc\n except ssl.SSLError as exc:\n self._read_bio.write_eof()\n self._write_bio.write_eof()\n if isinstance(exc, ssl.SSLEOFError) or (\n exc.strerror and "UNEXPECTED_EOF_WHILE_READING" in exc.strerror\n ):\n if self.standard_compatible:\n raise BrokenResourceError from exc\n else:\n raise EndOfStream from None\n\n raise\n else:\n # Flush any pending writes first\n if self._write_bio.pending:\n await self.transport_stream.send(self._write_bio.read())\n\n return result\n\n async def unwrap(self) -> tuple[AnyByteStream, bytes]:\n """\n Does the TLS closing handshake.\n\n :return: a tuple of (wrapped byte stream, bytes left in the read buffer)\n\n """\n await self._call_sslobject_method(self._ssl_object.unwrap)\n self._read_bio.write_eof()\n self._write_bio.write_eof()\n return self.transport_stream, self._read_bio.read()\n\n async def aclose(self) -> None:\n if self.standard_compatible:\n try:\n await self.unwrap()\n except BaseException:\n await aclose_forcefully(self.transport_stream)\n raise\n\n await self.transport_stream.aclose()\n\n async def receive(self, max_bytes: int = 65536) -> bytes:\n data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)\n if not data:\n raise EndOfStream\n\n return data\n\n async def send(self, item: bytes) -> None:\n await self._call_sslobject_method(self._ssl_object.write, item)\n\n async def send_eof(self) -> None:\n tls_version = self.extra(TLSAttribute.tls_version)\n match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)\n if match:\n major, minor = int(match.group(1)), int(match.group(2) or 0)\n if (major, minor) < (1, 3):\n raise NotImplementedError(\n f"send_eof() requires at least TLSv1.3; current "\n f"session uses {tls_version}"\n )\n\n raise NotImplementedError(\n "send_eof() has not yet been implemented for TLS streams"\n )\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return {\n **self.transport_stream.extra_attributes,\n TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,\n TLSAttribute.channel_binding_tls_unique: (\n self._ssl_object.get_channel_binding\n ),\n TLSAttribute.cipher: self._ssl_object.cipher,\n TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),\n TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(\n True\n ),\n TLSAttribute.server_side: lambda: self._ssl_object.server_side,\n TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()\n if self._ssl_object.server_side\n else None,\n TLSAttribute.standard_compatible: lambda: self.standard_compatible,\n TLSAttribute.ssl_object: lambda: self._ssl_object,\n TLSAttribute.tls_version: self._ssl_object.version,\n }\n\n\n@dataclass(eq=False)\nclass TLSListener(Listener[TLSStream]):\n """\n A convenience listener that wraps another listener and auto-negotiates a TLS session\n on every accepted connection.\n\n If the TLS handshake times out or raises an exception,\n :meth:`handle_handshake_error` is called to do whatever post-mortem processing is\n deemed necessary.\n\n Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.\n\n :param Listener listener: the listener to wrap\n :param ssl_context: the SSL context object\n :param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`\n :param handshake_timeout: time limit for the TLS handshake\n (passed to :func:`~anyio.fail_after`)\n """\n\n listener: Listener[Any]\n ssl_context: ssl.SSLContext\n standard_compatible: bool = True\n handshake_timeout: float = 30\n\n @staticmethod\n async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:\n """\n Handle an exception raised during the TLS handshake.\n\n This method does 3 things:\n\n #. Forcefully closes the original stream\n #. Logs the exception (unless it was a cancellation exception) using the\n ``anyio.streams.tls`` logger\n #. Reraises the exception if it was a base exception or a cancellation exception\n\n :param exc: the exception\n :param stream: the original stream\n\n """\n await aclose_forcefully(stream)\n\n # Log all except cancellation exceptions\n if not isinstance(exc, get_cancelled_exc_class()):\n # CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using\n # any asyncio implementation, so we explicitly pass the exception to log\n # (https://github.com/python/cpython/issues/108668). Trio does not have this\n # issue because it works around the CPython bug.\n logging.getLogger(__name__).exception(\n "Error during TLS handshake", exc_info=exc\n )\n\n # Only reraise base exceptions and cancellation exceptions\n if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):\n raise\n\n async def serve(\n self,\n handler: Callable[[TLSStream], Any],\n task_group: TaskGroup | None = None,\n ) -> None:\n @wraps(handler)\n async def handler_wrapper(stream: AnyByteStream) -> None:\n from .. import fail_after\n\n try:\n with fail_after(self.handshake_timeout):\n wrapped_stream = await TLSStream.wrap(\n stream,\n ssl_context=self.ssl_context,\n standard_compatible=self.standard_compatible,\n )\n except BaseException as exc:\n await self.handle_handshake_error(exc, stream)\n else:\n await handler(wrapped_stream)\n\n await self.listener.serve(handler_wrapper, task_group)\n\n async def aclose(self) -> None:\n await self.listener.aclose()\n\n @property\n def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:\n return {\n TLSAttribute.standard_compatible: lambda: self.standard_compatible,\n }\n
|
.venv\Lib\site-packages\anyio\streams\tls.py
|
tls.py
|
Python
| 13,199 | 0.95 | 0.170455 | 0.097643 |
awesome-app
| 761 |
2023-10-12T10:00:47.851272
|
BSD-3-Clause
| false |
938577f69784afc6be5a77db3eb1a036
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\buffered.cpython-313.pyc
|
buffered.cpython-313.pyc
|
Other
| 6,092 | 0.8 | 0.056338 | 0 |
react-lib
| 115 |
2023-11-23T06:31:06.081195
|
MIT
| false |
00638b49903a618d0ce946d09f0c30c5
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\file.cpython-313.pyc
|
file.cpython-313.pyc
|
Other
| 7,440 | 0.8 | 0.089888 | 0 |
python-kit
| 218 |
2024-06-09T20:01:41.400618
|
Apache-2.0
| false |
052254eb7107187d60ab735968b2cb5c
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\memory.cpython-313.pyc
|
memory.cpython-313.pyc
|
Other
| 14,860 | 0.8 | 0.070513 | 0 |
node-utils
| 838 |
2023-08-12T12:16:04.736868
|
GPL-3.0
| false |
dd081ff5d42efa42a8bcac8ab9b1eba0
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\stapled.cpython-313.pyc
|
stapled.cpython-313.pyc
|
Other
| 7,559 | 0.8 | 0 | 0 |
awesome-app
| 562 |
2023-12-30T04:19:51.308099
|
BSD-3-Clause
| false |
dd8c8bf61a6b97a812413adc12e4bf99
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\text.cpython-313.pyc
|
text.cpython-313.pyc
|
Other
| 8,252 | 0.8 | 0.11236 | 0 |
node-utils
| 346 |
2025-02-23T02:43:55.144352
|
BSD-3-Clause
| false |
9078fefc649c8697eedcb74fd103cbb6
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\tls.cpython-313.pyc
|
tls.cpython-313.pyc
|
Other
| 17,404 | 0.95 | 0.072165 | 0.022727 |
awesome-app
| 450 |
2024-09-26T21:19:11.705008
|
BSD-3-Clause
| false |
7f078f266d7cf9a981610244d488d6e8
|
\n\n
|
.venv\Lib\site-packages\anyio\streams\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 188 | 0.7 | 0 | 0 |
vue-tools
| 670 |
2024-01-22T00:45:49.908473
|
Apache-2.0
| false |
f5c625221e3ce5b68a93684700e6e870
|
from __future__ import annotations\n\nimport array\nimport math\nimport os\nimport socket\nimport sys\nimport types\nimport weakref\nfrom collections.abc import (\n AsyncGenerator,\n AsyncIterator,\n Awaitable,\n Callable,\n Collection,\n Coroutine,\n Iterable,\n Sequence,\n)\nfrom concurrent.futures import Future\nfrom contextlib import AbstractContextManager\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom io import IOBase\nfrom os import PathLike\nfrom signal import Signals\nfrom socket import AddressFamily, SocketKind\nfrom types import TracebackType\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Generic,\n NoReturn,\n TypeVar,\n cast,\n overload,\n)\n\nimport trio.from_thread\nimport trio.lowlevel\nfrom outcome import Error, Outcome, Value\nfrom trio.lowlevel import (\n current_root_task,\n current_task,\n wait_readable,\n wait_writable,\n)\nfrom trio.socket import SocketType as TrioSocketType\nfrom trio.to_thread import run_sync\n\nfrom .. import (\n CapacityLimiterStatistics,\n EventStatistics,\n LockStatistics,\n TaskInfo,\n WouldBlock,\n abc,\n)\nfrom .._core._eventloop import claim_worker_thread\nfrom .._core._exceptions import (\n BrokenResourceError,\n BusyResourceError,\n ClosedResourceError,\n EndOfStream,\n)\nfrom .._core._sockets import convert_ipv6_sockaddr\nfrom .._core._streams import create_memory_object_stream\nfrom .._core._synchronization import (\n CapacityLimiter as BaseCapacityLimiter,\n)\nfrom .._core._synchronization import Event as BaseEvent\nfrom .._core._synchronization import Lock as BaseLock\nfrom .._core._synchronization import (\n ResourceGuard,\n SemaphoreStatistics,\n)\nfrom .._core._synchronization import Semaphore as BaseSemaphore\nfrom .._core._tasks import CancelScope as BaseCancelScope\nfrom ..abc import IPSockAddrType, UDPPacketType, UNIXDatagramPacketType\nfrom ..abc._eventloop import AsyncBackend, StrOrBytesPath\nfrom ..streams.memory import MemoryObjectSendStream\n\nif TYPE_CHECKING:\n from _typeshed import HasFileno\n\nif sys.version_info >= (3, 10):\n from typing import ParamSpec\nelse:\n from typing_extensions import ParamSpec\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from exceptiongroup import BaseExceptionGroup\n from typing_extensions import TypeVarTuple, Unpack\n\nT = TypeVar("T")\nT_Retval = TypeVar("T_Retval")\nT_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)\nPosArgsT = TypeVarTuple("PosArgsT")\nP = ParamSpec("P")\n\n\n#\n# Event loop\n#\n\nRunVar = trio.lowlevel.RunVar\n\n\n#\n# Timeouts and cancellation\n#\n\n\nclass CancelScope(BaseCancelScope):\n def __new__(\n cls, original: trio.CancelScope | None = None, **kwargs: object\n ) -> CancelScope:\n return object.__new__(cls)\n\n def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:\n self.__original = original or trio.CancelScope(**kwargs)\n\n def __enter__(self) -> CancelScope:\n self.__original.__enter__()\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool:\n return self.__original.__exit__(exc_type, exc_val, exc_tb)\n\n def cancel(self) -> None:\n self.__original.cancel()\n\n @property\n def deadline(self) -> float:\n return self.__original.deadline\n\n @deadline.setter\n def deadline(self, value: float) -> None:\n self.__original.deadline = value\n\n @property\n def cancel_called(self) -> bool:\n return self.__original.cancel_called\n\n @property\n def cancelled_caught(self) -> bool:\n return self.__original.cancelled_caught\n\n @property\n def shield(self) -> bool:\n return self.__original.shield\n\n @shield.setter\n def shield(self, value: bool) -> None:\n self.__original.shield = value\n\n\n#\n# Task groups\n#\n\n\nclass TaskGroup(abc.TaskGroup):\n def __init__(self) -> None:\n self._active = False\n self._nursery_manager = trio.open_nursery(strict_exception_groups=True)\n self.cancel_scope = None # type: ignore[assignment]\n\n async def __aenter__(self) -> TaskGroup:\n self._active = True\n self._nursery = await self._nursery_manager.__aenter__()\n self.cancel_scope = CancelScope(self._nursery.cancel_scope)\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool:\n try:\n # trio.Nursery.__exit__ returns bool; .open_nursery has wrong type\n return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb) # type: ignore[return-value]\n except BaseExceptionGroup as exc:\n if not exc.split(trio.Cancelled)[1]:\n raise trio.Cancelled._create() from exc\n\n raise\n finally:\n del exc_val, exc_tb\n self._active = False\n\n def start_soon(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],\n *args: Unpack[PosArgsT],\n name: object = None,\n ) -> None:\n if not self._active:\n raise RuntimeError(\n "This task group is not active; no new tasks can be started."\n )\n\n self._nursery.start_soon(func, *args, name=name)\n\n async def start(\n self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None\n ) -> Any:\n if not self._active:\n raise RuntimeError(\n "This task group is not active; no new tasks can be started."\n )\n\n return await self._nursery.start(func, *args, name=name)\n\n\n#\n# Threads\n#\n\n\nclass BlockingPortal(abc.BlockingPortal):\n def __new__(cls) -> BlockingPortal:\n return object.__new__(cls)\n\n def __init__(self) -> None:\n super().__init__()\n self._token = trio.lowlevel.current_trio_token()\n\n def _spawn_task_from_thread(\n self,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],\n args: tuple[Unpack[PosArgsT]],\n kwargs: dict[str, Any],\n name: object,\n future: Future[T_Retval],\n ) -> None:\n trio.from_thread.run_sync(\n partial(self._task_group.start_soon, name=name),\n self._call_func,\n func,\n args,\n kwargs,\n future,\n trio_token=self._token,\n )\n\n\n#\n# Subprocesses\n#\n\n\n@dataclass(eq=False)\nclass ReceiveStreamWrapper(abc.ByteReceiveStream):\n _stream: trio.abc.ReceiveStream\n\n async def receive(self, max_bytes: int | None = None) -> bytes:\n try:\n data = await self._stream.receive_some(max_bytes)\n except trio.ClosedResourceError as exc:\n raise ClosedResourceError from exc.__cause__\n except trio.BrokenResourceError as exc:\n raise BrokenResourceError from exc.__cause__\n\n if data:\n return data\n else:\n raise EndOfStream\n\n async def aclose(self) -> None:\n await self._stream.aclose()\n\n\n@dataclass(eq=False)\nclass SendStreamWrapper(abc.ByteSendStream):\n _stream: trio.abc.SendStream\n\n async def send(self, item: bytes) -> None:\n try:\n await self._stream.send_all(item)\n except trio.ClosedResourceError as exc:\n raise ClosedResourceError from exc.__cause__\n except trio.BrokenResourceError as exc:\n raise BrokenResourceError from exc.__cause__\n\n async def aclose(self) -> None:\n await self._stream.aclose()\n\n\n@dataclass(eq=False)\nclass Process(abc.Process):\n _process: trio.Process\n _stdin: abc.ByteSendStream | None\n _stdout: abc.ByteReceiveStream | None\n _stderr: abc.ByteReceiveStream | None\n\n async def aclose(self) -> None:\n with CancelScope(shield=True):\n if self._stdin:\n await self._stdin.aclose()\n if self._stdout:\n await self._stdout.aclose()\n if self._stderr:\n await self._stderr.aclose()\n\n try:\n await self.wait()\n except BaseException:\n self.kill()\n with CancelScope(shield=True):\n await self.wait()\n raise\n\n async def wait(self) -> int:\n return await self._process.wait()\n\n def terminate(self) -> None:\n self._process.terminate()\n\n def kill(self) -> None:\n self._process.kill()\n\n def send_signal(self, signal: Signals) -> None:\n self._process.send_signal(signal)\n\n @property\n def pid(self) -> int:\n return self._process.pid\n\n @property\n def returncode(self) -> int | None:\n return self._process.returncode\n\n @property\n def stdin(self) -> abc.ByteSendStream | None:\n return self._stdin\n\n @property\n def stdout(self) -> abc.ByteReceiveStream | None:\n return self._stdout\n\n @property\n def stderr(self) -> abc.ByteReceiveStream | None:\n return self._stderr\n\n\nclass _ProcessPoolShutdownInstrument(trio.abc.Instrument):\n def after_run(self) -> None:\n super().after_run()\n\n\ncurrent_default_worker_process_limiter: trio.lowlevel.RunVar = RunVar(\n "current_default_worker_process_limiter"\n)\n\n\nasync def _shutdown_process_pool(workers: set[abc.Process]) -> None:\n try:\n await trio.sleep(math.inf)\n except trio.Cancelled:\n for process in workers:\n if process.returncode is None:\n process.kill()\n\n with CancelScope(shield=True):\n for process in workers:\n await process.aclose()\n\n\n#\n# Sockets and networking\n#\n\n\nclass _TrioSocketMixin(Generic[T_SockAddr]):\n def __init__(self, trio_socket: TrioSocketType) -> None:\n self._trio_socket = trio_socket\n self._closed = False\n\n def _check_closed(self) -> None:\n if self._closed:\n raise ClosedResourceError\n if self._trio_socket.fileno() < 0:\n raise BrokenResourceError\n\n @property\n def _raw_socket(self) -> socket.socket:\n return self._trio_socket._sock # type: ignore[attr-defined]\n\n async def aclose(self) -> None:\n if self._trio_socket.fileno() >= 0:\n self._closed = True\n self._trio_socket.close()\n\n def _convert_socket_error(self, exc: BaseException) -> NoReturn:\n if isinstance(exc, trio.ClosedResourceError):\n raise ClosedResourceError from exc\n elif self._trio_socket.fileno() < 0 and self._closed:\n raise ClosedResourceError from None\n elif isinstance(exc, OSError):\n raise BrokenResourceError from exc\n else:\n raise exc\n\n\nclass SocketStream(_TrioSocketMixin, abc.SocketStream):\n def __init__(self, trio_socket: TrioSocketType) -> None:\n super().__init__(trio_socket)\n self._receive_guard = ResourceGuard("reading from")\n self._send_guard = ResourceGuard("writing to")\n\n async def receive(self, max_bytes: int = 65536) -> bytes:\n with self._receive_guard:\n try:\n data = await self._trio_socket.recv(max_bytes)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n if data:\n return data\n else:\n raise EndOfStream\n\n async def send(self, item: bytes) -> None:\n with self._send_guard:\n view = memoryview(item)\n while view:\n try:\n bytes_sent = await self._trio_socket.send(view)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n view = view[bytes_sent:]\n\n async def send_eof(self) -> None:\n self._trio_socket.shutdown(socket.SHUT_WR)\n\n\nclass UNIXSocketStream(SocketStream, abc.UNIXSocketStream):\n async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:\n if not isinstance(msglen, int) or msglen < 0:\n raise ValueError("msglen must be a non-negative integer")\n if not isinstance(maxfds, int) or maxfds < 1:\n raise ValueError("maxfds must be a positive integer")\n\n fds = array.array("i")\n await trio.lowlevel.checkpoint()\n with self._receive_guard:\n while True:\n try:\n message, ancdata, flags, addr = await self._trio_socket.recvmsg(\n msglen, socket.CMSG_LEN(maxfds * fds.itemsize)\n )\n except BaseException as exc:\n self._convert_socket_error(exc)\n else:\n if not message and not ancdata:\n raise EndOfStream\n\n break\n\n for cmsg_level, cmsg_type, cmsg_data in ancdata:\n if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:\n raise RuntimeError(\n f"Received unexpected ancillary data; message = {message!r}, "\n f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"\n )\n\n fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])\n\n return message, list(fds)\n\n async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:\n if not message:\n raise ValueError("message must not be empty")\n if not fds:\n raise ValueError("fds must not be empty")\n\n filenos: list[int] = []\n for fd in fds:\n if isinstance(fd, int):\n filenos.append(fd)\n elif isinstance(fd, IOBase):\n filenos.append(fd.fileno())\n\n fdarray = array.array("i", filenos)\n await trio.lowlevel.checkpoint()\n with self._send_guard:\n while True:\n try:\n await self._trio_socket.sendmsg(\n [message],\n [\n (\n socket.SOL_SOCKET,\n socket.SCM_RIGHTS,\n fdarray,\n )\n ],\n )\n break\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n\nclass TCPSocketListener(_TrioSocketMixin, abc.SocketListener):\n def __init__(self, raw_socket: socket.socket):\n super().__init__(trio.socket.from_stdlib_socket(raw_socket))\n self._accept_guard = ResourceGuard("accepting connections from")\n\n async def accept(self) -> SocketStream:\n with self._accept_guard:\n try:\n trio_socket, _addr = await self._trio_socket.accept()\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n return SocketStream(trio_socket)\n\n\nclass UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):\n def __init__(self, raw_socket: socket.socket):\n super().__init__(trio.socket.from_stdlib_socket(raw_socket))\n self._accept_guard = ResourceGuard("accepting connections from")\n\n async def accept(self) -> UNIXSocketStream:\n with self._accept_guard:\n try:\n trio_socket, _addr = await self._trio_socket.accept()\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n return UNIXSocketStream(trio_socket)\n\n\nclass UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):\n def __init__(self, trio_socket: TrioSocketType) -> None:\n super().__init__(trio_socket)\n self._receive_guard = ResourceGuard("reading from")\n self._send_guard = ResourceGuard("writing to")\n\n async def receive(self) -> tuple[bytes, IPSockAddrType]:\n with self._receive_guard:\n try:\n data, addr = await self._trio_socket.recvfrom(65536)\n return data, convert_ipv6_sockaddr(addr)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n async def send(self, item: UDPPacketType) -> None:\n with self._send_guard:\n try:\n await self._trio_socket.sendto(*item)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n\nclass ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):\n def __init__(self, trio_socket: TrioSocketType) -> None:\n super().__init__(trio_socket)\n self._receive_guard = ResourceGuard("reading from")\n self._send_guard = ResourceGuard("writing to")\n\n async def receive(self) -> bytes:\n with self._receive_guard:\n try:\n return await self._trio_socket.recv(65536)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n async def send(self, item: bytes) -> None:\n with self._send_guard:\n try:\n await self._trio_socket.send(item)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n\nclass UNIXDatagramSocket(_TrioSocketMixin[str], abc.UNIXDatagramSocket):\n def __init__(self, trio_socket: TrioSocketType) -> None:\n super().__init__(trio_socket)\n self._receive_guard = ResourceGuard("reading from")\n self._send_guard = ResourceGuard("writing to")\n\n async def receive(self) -> UNIXDatagramPacketType:\n with self._receive_guard:\n try:\n data, addr = await self._trio_socket.recvfrom(65536)\n return data, addr\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n async def send(self, item: UNIXDatagramPacketType) -> None:\n with self._send_guard:\n try:\n await self._trio_socket.sendto(*item)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n\nclass ConnectedUNIXDatagramSocket(\n _TrioSocketMixin[str], abc.ConnectedUNIXDatagramSocket\n):\n def __init__(self, trio_socket: TrioSocketType) -> None:\n super().__init__(trio_socket)\n self._receive_guard = ResourceGuard("reading from")\n self._send_guard = ResourceGuard("writing to")\n\n async def receive(self) -> bytes:\n with self._receive_guard:\n try:\n return await self._trio_socket.recv(65536)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n async def send(self, item: bytes) -> None:\n with self._send_guard:\n try:\n await self._trio_socket.send(item)\n except BaseException as exc:\n self._convert_socket_error(exc)\n\n\n#\n# Synchronization\n#\n\n\nclass Event(BaseEvent):\n def __new__(cls) -> Event:\n return object.__new__(cls)\n\n def __init__(self) -> None:\n self.__original = trio.Event()\n\n def is_set(self) -> bool:\n return self.__original.is_set()\n\n async def wait(self) -> None:\n return await self.__original.wait()\n\n def statistics(self) -> EventStatistics:\n orig_statistics = self.__original.statistics()\n return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)\n\n def set(self) -> None:\n self.__original.set()\n\n\nclass Lock(BaseLock):\n def __new__(cls, *, fast_acquire: bool = False) -> Lock:\n return object.__new__(cls)\n\n def __init__(self, *, fast_acquire: bool = False) -> None:\n self._fast_acquire = fast_acquire\n self.__original = trio.Lock()\n\n @staticmethod\n def _convert_runtime_error_msg(exc: RuntimeError) -> None:\n if exc.args == ("attempt to re-acquire an already held Lock",):\n exc.args = ("Attempted to acquire an already held Lock",)\n\n async def acquire(self) -> None:\n if not self._fast_acquire:\n try:\n await self.__original.acquire()\n except RuntimeError as exc:\n self._convert_runtime_error_msg(exc)\n raise\n\n return\n\n # This is the "fast path" where we don't let other tasks run\n await trio.lowlevel.checkpoint_if_cancelled()\n try:\n self.__original.acquire_nowait()\n except trio.WouldBlock:\n await self.__original._lot.park()\n except RuntimeError as exc:\n self._convert_runtime_error_msg(exc)\n raise\n\n def acquire_nowait(self) -> None:\n try:\n self.__original.acquire_nowait()\n except trio.WouldBlock:\n raise WouldBlock from None\n except RuntimeError as exc:\n self._convert_runtime_error_msg(exc)\n raise\n\n def locked(self) -> bool:\n return self.__original.locked()\n\n def release(self) -> None:\n self.__original.release()\n\n def statistics(self) -> LockStatistics:\n orig_statistics = self.__original.statistics()\n owner = TrioTaskInfo(orig_statistics.owner) if orig_statistics.owner else None\n return LockStatistics(\n orig_statistics.locked, owner, orig_statistics.tasks_waiting\n )\n\n\nclass Semaphore(BaseSemaphore):\n def __new__(\n cls,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> Semaphore:\n return object.__new__(cls)\n\n def __init__(\n self,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> None:\n super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)\n self.__original = trio.Semaphore(initial_value, max_value=max_value)\n\n async def acquire(self) -> None:\n if not self._fast_acquire:\n await self.__original.acquire()\n return\n\n # This is the "fast path" where we don't let other tasks run\n await trio.lowlevel.checkpoint_if_cancelled()\n try:\n self.__original.acquire_nowait()\n except trio.WouldBlock:\n await self.__original._lot.park()\n\n def acquire_nowait(self) -> None:\n try:\n self.__original.acquire_nowait()\n except trio.WouldBlock:\n raise WouldBlock from None\n\n @property\n def max_value(self) -> int | None:\n return self.__original.max_value\n\n @property\n def value(self) -> int:\n return self.__original.value\n\n def release(self) -> None:\n self.__original.release()\n\n def statistics(self) -> SemaphoreStatistics:\n orig_statistics = self.__original.statistics()\n return SemaphoreStatistics(orig_statistics.tasks_waiting)\n\n\nclass CapacityLimiter(BaseCapacityLimiter):\n def __new__(\n cls,\n total_tokens: float | None = None,\n *,\n original: trio.CapacityLimiter | None = None,\n ) -> CapacityLimiter:\n return object.__new__(cls)\n\n def __init__(\n self,\n total_tokens: float | None = None,\n *,\n original: trio.CapacityLimiter | None = None,\n ) -> None:\n if original is not None:\n self.__original = original\n else:\n assert total_tokens is not None\n self.__original = trio.CapacityLimiter(total_tokens)\n\n async def __aenter__(self) -> None:\n return await self.__original.__aenter__()\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n await self.__original.__aexit__(exc_type, exc_val, exc_tb)\n\n @property\n def total_tokens(self) -> float:\n return self.__original.total_tokens\n\n @total_tokens.setter\n def total_tokens(self, value: float) -> None:\n self.__original.total_tokens = value\n\n @property\n def borrowed_tokens(self) -> int:\n return self.__original.borrowed_tokens\n\n @property\n def available_tokens(self) -> float:\n return self.__original.available_tokens\n\n def acquire_nowait(self) -> None:\n self.__original.acquire_nowait()\n\n def acquire_on_behalf_of_nowait(self, borrower: object) -> None:\n self.__original.acquire_on_behalf_of_nowait(borrower)\n\n async def acquire(self) -> None:\n await self.__original.acquire()\n\n async def acquire_on_behalf_of(self, borrower: object) -> None:\n await self.__original.acquire_on_behalf_of(borrower)\n\n def release(self) -> None:\n return self.__original.release()\n\n def release_on_behalf_of(self, borrower: object) -> None:\n return self.__original.release_on_behalf_of(borrower)\n\n def statistics(self) -> CapacityLimiterStatistics:\n orig = self.__original.statistics()\n return CapacityLimiterStatistics(\n borrowed_tokens=orig.borrowed_tokens,\n total_tokens=orig.total_tokens,\n borrowers=tuple(orig.borrowers),\n tasks_waiting=orig.tasks_waiting,\n )\n\n\n_capacity_limiter_wrapper: trio.lowlevel.RunVar = RunVar("_capacity_limiter_wrapper")\n\n\n#\n# Signal handling\n#\n\n\nclass _SignalReceiver:\n _iterator: AsyncIterator[int]\n\n def __init__(self, signals: tuple[Signals, ...]):\n self._signals = signals\n\n def __enter__(self) -> _SignalReceiver:\n self._cm = trio.open_signal_receiver(*self._signals)\n self._iterator = self._cm.__enter__()\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n return self._cm.__exit__(exc_type, exc_val, exc_tb)\n\n def __aiter__(self) -> _SignalReceiver:\n return self\n\n async def __anext__(self) -> Signals:\n signum = await self._iterator.__anext__()\n return Signals(signum)\n\n\n#\n# Testing and debugging\n#\n\n\nclass TestRunner(abc.TestRunner):\n def __init__(self, **options: Any) -> None:\n from queue import Queue\n\n self._call_queue: Queue[Callable[[], object]] = Queue()\n self._send_stream: MemoryObjectSendStream | None = None\n self._options = options\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: types.TracebackType | None,\n ) -> None:\n if self._send_stream:\n self._send_stream.close()\n while self._send_stream is not None:\n self._call_queue.get()()\n\n async def _run_tests_and_fixtures(self) -> None:\n self._send_stream, receive_stream = create_memory_object_stream(1)\n with receive_stream:\n async for coro, outcome_holder in receive_stream:\n try:\n retval = await coro\n except BaseException as exc:\n outcome_holder.append(Error(exc))\n else:\n outcome_holder.append(Value(retval))\n\n def _main_task_finished(self, outcome: object) -> None:\n self._send_stream = None\n\n def _call_in_runner_task(\n self,\n func: Callable[P, Awaitable[T_Retval]],\n *args: P.args,\n **kwargs: P.kwargs,\n ) -> T_Retval:\n if self._send_stream is None:\n trio.lowlevel.start_guest_run(\n self._run_tests_and_fixtures,\n run_sync_soon_threadsafe=self._call_queue.put,\n done_callback=self._main_task_finished,\n **self._options,\n )\n while self._send_stream is None:\n self._call_queue.get()()\n\n outcome_holder: list[Outcome] = []\n self._send_stream.send_nowait((func(*args, **kwargs), outcome_holder))\n while not outcome_holder:\n self._call_queue.get()()\n\n return outcome_holder[0].unwrap()\n\n def run_asyncgen_fixture(\n self,\n fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],\n kwargs: dict[str, Any],\n ) -> Iterable[T_Retval]:\n asyncgen = fixture_func(**kwargs)\n fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)\n\n yield fixturevalue\n\n try:\n self._call_in_runner_task(asyncgen.asend, None)\n except StopAsyncIteration:\n pass\n else:\n self._call_in_runner_task(asyncgen.aclose)\n raise RuntimeError("Async generator fixture did not stop")\n\n def run_fixture(\n self,\n fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],\n kwargs: dict[str, Any],\n ) -> T_Retval:\n return self._call_in_runner_task(fixture_func, **kwargs)\n\n def run_test(\n self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]\n ) -> None:\n self._call_in_runner_task(test_func, **kwargs)\n\n\nclass TrioTaskInfo(TaskInfo):\n def __init__(self, task: trio.lowlevel.Task):\n parent_id = None\n if task.parent_nursery and task.parent_nursery.parent_task:\n parent_id = id(task.parent_nursery.parent_task)\n\n super().__init__(id(task), parent_id, task.name, task.coro)\n self._task = weakref.proxy(task)\n\n def has_pending_cancellation(self) -> bool:\n try:\n return self._task._cancel_status.effectively_cancelled\n except ReferenceError:\n # If the task is no longer around, it surely doesn't have a cancellation\n # pending\n return False\n\n\nclass TrioBackend(AsyncBackend):\n @classmethod\n def run(\n cls,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n args: tuple[Unpack[PosArgsT]],\n kwargs: dict[str, Any],\n options: dict[str, Any],\n ) -> T_Retval:\n return trio.run(func, *args)\n\n @classmethod\n def current_token(cls) -> object:\n return trio.lowlevel.current_trio_token()\n\n @classmethod\n def current_time(cls) -> float:\n return trio.current_time()\n\n @classmethod\n def cancelled_exception_class(cls) -> type[BaseException]:\n return trio.Cancelled\n\n @classmethod\n async def checkpoint(cls) -> None:\n await trio.lowlevel.checkpoint()\n\n @classmethod\n async def checkpoint_if_cancelled(cls) -> None:\n await trio.lowlevel.checkpoint_if_cancelled()\n\n @classmethod\n async def cancel_shielded_checkpoint(cls) -> None:\n await trio.lowlevel.cancel_shielded_checkpoint()\n\n @classmethod\n async def sleep(cls, delay: float) -> None:\n await trio.sleep(delay)\n\n @classmethod\n def create_cancel_scope(\n cls, *, deadline: float = math.inf, shield: bool = False\n ) -> abc.CancelScope:\n return CancelScope(deadline=deadline, shield=shield)\n\n @classmethod\n def current_effective_deadline(cls) -> float:\n return trio.current_effective_deadline()\n\n @classmethod\n def create_task_group(cls) -> abc.TaskGroup:\n return TaskGroup()\n\n @classmethod\n def create_event(cls) -> abc.Event:\n return Event()\n\n @classmethod\n def create_lock(cls, *, fast_acquire: bool) -> Lock:\n return Lock(fast_acquire=fast_acquire)\n\n @classmethod\n def create_semaphore(\n cls,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> abc.Semaphore:\n return Semaphore(initial_value, max_value=max_value, fast_acquire=fast_acquire)\n\n @classmethod\n def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:\n return CapacityLimiter(total_tokens)\n\n @classmethod\n async def run_sync_in_worker_thread(\n cls,\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n args: tuple[Unpack[PosArgsT]],\n abandon_on_cancel: bool = False,\n limiter: abc.CapacityLimiter | None = None,\n ) -> T_Retval:\n def wrapper() -> T_Retval:\n with claim_worker_thread(TrioBackend, token):\n return func(*args)\n\n token = TrioBackend.current_token()\n return await run_sync(\n wrapper,\n abandon_on_cancel=abandon_on_cancel,\n limiter=cast(trio.CapacityLimiter, limiter),\n )\n\n @classmethod\n def check_cancelled(cls) -> None:\n trio.from_thread.check_cancelled()\n\n @classmethod\n def run_async_from_thread(\n cls,\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n args: tuple[Unpack[PosArgsT]],\n token: object,\n ) -> T_Retval:\n return trio.from_thread.run(func, *args)\n\n @classmethod\n def run_sync_from_thread(\n cls,\n func: Callable[[Unpack[PosArgsT]], T_Retval],\n args: tuple[Unpack[PosArgsT]],\n token: object,\n ) -> T_Retval:\n return trio.from_thread.run_sync(func, *args)\n\n @classmethod\n def create_blocking_portal(cls) -> abc.BlockingPortal:\n return BlockingPortal()\n\n @classmethod\n async def open_process(\n cls,\n command: StrOrBytesPath | Sequence[StrOrBytesPath],\n *,\n stdin: int | IO[Any] | None,\n stdout: int | IO[Any] | None,\n stderr: int | IO[Any] | None,\n **kwargs: Any,\n ) -> Process:\n def convert_item(item: StrOrBytesPath) -> str:\n str_or_bytes = os.fspath(item)\n if isinstance(str_or_bytes, str):\n return str_or_bytes\n else:\n return os.fsdecode(str_or_bytes)\n\n if isinstance(command, (str, bytes, PathLike)):\n process = await trio.lowlevel.open_process(\n convert_item(command),\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n shell=True,\n **kwargs,\n )\n else:\n process = await trio.lowlevel.open_process(\n [convert_item(item) for item in command],\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n shell=False,\n **kwargs,\n )\n\n stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None\n stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None\n stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None\n return Process(process, stdin_stream, stdout_stream, stderr_stream)\n\n @classmethod\n def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:\n trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)\n\n @classmethod\n async def connect_tcp(\n cls, host: str, port: int, local_address: IPSockAddrType | None = None\n ) -> SocketStream:\n family = socket.AF_INET6 if ":" in host else socket.AF_INET\n trio_socket = trio.socket.socket(family)\n trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if local_address:\n await trio_socket.bind(local_address)\n\n try:\n await trio_socket.connect((host, port))\n except BaseException:\n trio_socket.close()\n raise\n\n return SocketStream(trio_socket)\n\n @classmethod\n async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:\n trio_socket = trio.socket.socket(socket.AF_UNIX)\n try:\n await trio_socket.connect(path)\n except BaseException:\n trio_socket.close()\n raise\n\n return UNIXSocketStream(trio_socket)\n\n @classmethod\n def create_tcp_listener(cls, sock: socket.socket) -> abc.SocketListener:\n return TCPSocketListener(sock)\n\n @classmethod\n def create_unix_listener(cls, sock: socket.socket) -> abc.SocketListener:\n return UNIXSocketListener(sock)\n\n @classmethod\n async def create_udp_socket(\n cls,\n family: socket.AddressFamily,\n local_address: IPSockAddrType | None,\n remote_address: IPSockAddrType | None,\n reuse_port: bool,\n ) -> UDPSocket | ConnectedUDPSocket:\n trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)\n\n if reuse_port:\n trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n\n if local_address:\n await trio_socket.bind(local_address)\n\n if remote_address:\n await trio_socket.connect(remote_address)\n return ConnectedUDPSocket(trio_socket)\n else:\n return UDPSocket(trio_socket)\n\n @classmethod\n @overload\n async def create_unix_datagram_socket(\n cls, raw_socket: socket.socket, remote_path: None\n ) -> abc.UNIXDatagramSocket: ...\n\n @classmethod\n @overload\n async def create_unix_datagram_socket(\n cls, raw_socket: socket.socket, remote_path: str | bytes\n ) -> abc.ConnectedUNIXDatagramSocket: ...\n\n @classmethod\n async def create_unix_datagram_socket(\n cls, raw_socket: socket.socket, remote_path: str | bytes | None\n ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:\n trio_socket = trio.socket.from_stdlib_socket(raw_socket)\n\n if remote_path:\n await trio_socket.connect(remote_path)\n return ConnectedUNIXDatagramSocket(trio_socket)\n else:\n return UNIXDatagramSocket(trio_socket)\n\n @classmethod\n async def getaddrinfo(\n cls,\n host: bytes | str | None,\n port: str | int | None,\n *,\n family: int | AddressFamily = 0,\n type: int | SocketKind = 0,\n proto: int = 0,\n flags: int = 0,\n ) -> Sequence[\n tuple[\n AddressFamily,\n SocketKind,\n int,\n str,\n tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],\n ]\n ]:\n return await trio.socket.getaddrinfo(host, port, family, type, proto, flags)\n\n @classmethod\n async def getnameinfo(\n cls, sockaddr: IPSockAddrType, flags: int = 0\n ) -> tuple[str, str]:\n return await trio.socket.getnameinfo(sockaddr, flags)\n\n @classmethod\n async def wait_readable(cls, obj: HasFileno | int) -> None:\n try:\n await wait_readable(obj)\n except trio.ClosedResourceError as exc:\n raise ClosedResourceError().with_traceback(exc.__traceback__) from None\n except trio.BusyResourceError:\n raise BusyResourceError("reading from") from None\n\n @classmethod\n async def wait_writable(cls, obj: HasFileno | int) -> None:\n try:\n await wait_writable(obj)\n except trio.ClosedResourceError as exc:\n raise ClosedResourceError().with_traceback(exc.__traceback__) from None\n except trio.BusyResourceError:\n raise BusyResourceError("writing to") from None\n\n @classmethod\n def current_default_thread_limiter(cls) -> CapacityLimiter:\n try:\n return _capacity_limiter_wrapper.get()\n except LookupError:\n limiter = CapacityLimiter(\n original=trio.to_thread.current_default_thread_limiter()\n )\n _capacity_limiter_wrapper.set(limiter)\n return limiter\n\n @classmethod\n def open_signal_receiver(\n cls, *signals: Signals\n ) -> AbstractContextManager[AsyncIterator[Signals]]:\n return _SignalReceiver(signals)\n\n @classmethod\n def get_current_task(cls) -> TaskInfo:\n task = current_task()\n return TrioTaskInfo(task)\n\n @classmethod\n def get_running_tasks(cls) -> Sequence[TaskInfo]:\n root_task = current_root_task()\n assert root_task\n task_infos = [TrioTaskInfo(root_task)]\n nurseries = root_task.child_nurseries\n while nurseries:\n new_nurseries: list[trio.Nursery] = []\n for nursery in nurseries:\n for task in nursery.child_tasks:\n task_infos.append(TrioTaskInfo(task))\n new_nurseries.extend(task.child_nurseries)\n\n nurseries = new_nurseries\n\n return task_infos\n\n @classmethod\n async def wait_all_tasks_blocked(cls) -> None:\n from trio.testing import wait_all_tasks_blocked\n\n await wait_all_tasks_blocked()\n\n @classmethod\n def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:\n return TestRunner(**options)\n\n\nbackend_class = TrioBackend\n
|
.venv\Lib\site-packages\anyio\_backends\_trio.py
|
_trio.py
|
Python
| 40,429 | 0.95 | 0.201649 | 0.042593 |
python-kit
| 797 |
2024-07-04T10:39:07.879011
|
Apache-2.0
| false |
1157b6a2ae73078db97ca4b8144e17de
|
\n\n
|
.venv\Lib\site-packages\anyio\_backends\__pycache__\_trio.cpython-313.pyc
|
_trio.cpython-313.pyc
|
Other
| 71,904 | 0.6 | 0 | 0.00365 |
awesome-app
| 296 |
2025-05-04T23:53:58.705057
|
MIT
| false |
ed6c404bb08d2908b01ddac9741c868a
|
\n\n
|
.venv\Lib\site-packages\anyio\_backends\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 190 | 0.7 | 0 | 0 |
react-lib
| 662 |
2023-12-27T00:25:29.456885
|
Apache-2.0
| false |
755269ec8ce4694ffb2564113c9de33b
|
from __future__ import annotations\n\nimport asyncio\nimport socket\nimport threading\nfrom collections.abc import Callable\nfrom selectors import EVENT_READ, EVENT_WRITE, DefaultSelector\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING:\n from _typeshed import FileDescriptorLike\n\n_selector_lock = threading.Lock()\n_selector: Selector | None = None\n\n\nclass Selector:\n def __init__(self) -> None:\n self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")\n self._selector = DefaultSelector()\n self._send, self._receive = socket.socketpair()\n self._send.setblocking(False)\n self._receive.setblocking(False)\n # This somewhat reduces the amount of memory wasted queueing up data\n # for wakeups. With these settings, maximum number of 1-byte sends\n # before getting BlockingIOError:\n # Linux 4.8: 6\n # macOS (darwin 15.5): 1\n # Windows 10: 525347\n # Windows you're weird. (And on Windows setting SNDBUF to 0 makes send\n # blocking, even on non-blocking sockets, so don't do that.)\n self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)\n self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)\n # On Windows this is a TCP socket so this might matter. On other\n # platforms this fails b/c AF_UNIX sockets aren't actually TCP.\n try:\n self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n except OSError:\n pass\n\n self._selector.register(self._receive, EVENT_READ)\n self._closed = False\n\n def start(self) -> None:\n self._thread.start()\n threading._register_atexit(self._stop) # type: ignore[attr-defined]\n\n def _stop(self) -> None:\n global _selector\n self._closed = True\n self._notify_self()\n self._send.close()\n self._thread.join()\n self._selector.unregister(self._receive)\n self._receive.close()\n self._selector.close()\n _selector = None\n assert not self._selector.get_map(), (\n "selector still has registered file descriptors after shutdown"\n )\n\n def _notify_self(self) -> None:\n try:\n self._send.send(b"\x00")\n except BlockingIOError:\n pass\n\n def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:\n loop = asyncio.get_running_loop()\n try:\n key = self._selector.get_key(fd)\n except KeyError:\n self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})\n else:\n if EVENT_READ in key.data:\n raise ValueError(\n "this file descriptor is already registered for reading"\n )\n\n key.data[EVENT_READ] = loop, callback\n self._selector.modify(fd, key.events | EVENT_READ, key.data)\n\n self._notify_self()\n\n def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:\n loop = asyncio.get_running_loop()\n try:\n key = self._selector.get_key(fd)\n except KeyError:\n self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})\n else:\n if EVENT_WRITE in key.data:\n raise ValueError(\n "this file descriptor is already registered for writing"\n )\n\n key.data[EVENT_WRITE] = loop, callback\n self._selector.modify(fd, key.events | EVENT_WRITE, key.data)\n\n self._notify_self()\n\n def remove_reader(self, fd: FileDescriptorLike) -> bool:\n try:\n key = self._selector.get_key(fd)\n except KeyError:\n return False\n\n if new_events := key.events ^ EVENT_READ:\n del key.data[EVENT_READ]\n self._selector.modify(fd, new_events, key.data)\n else:\n self._selector.unregister(fd)\n\n return True\n\n def remove_writer(self, fd: FileDescriptorLike) -> bool:\n try:\n key = self._selector.get_key(fd)\n except KeyError:\n return False\n\n if new_events := key.events ^ EVENT_WRITE:\n del key.data[EVENT_WRITE]\n self._selector.modify(fd, new_events, key.data)\n else:\n self._selector.unregister(fd)\n\n return True\n\n def run(self) -> None:\n while not self._closed:\n for key, events in self._selector.select():\n if key.fileobj is self._receive:\n try:\n while self._receive.recv(4096):\n pass\n except BlockingIOError:\n pass\n\n continue\n\n if events & EVENT_READ:\n loop, callback = key.data[EVENT_READ]\n self.remove_reader(key.fd)\n try:\n loop.call_soon_threadsafe(callback)\n except RuntimeError:\n pass # the loop was already closed\n\n if events & EVENT_WRITE:\n loop, callback = key.data[EVENT_WRITE]\n self.remove_writer(key.fd)\n try:\n loop.call_soon_threadsafe(callback)\n except RuntimeError:\n pass # the loop was already closed\n\n\ndef get_selector() -> Selector:\n global _selector\n\n with _selector_lock:\n if _selector is None:\n _selector = Selector()\n _selector.start()\n\n return _selector\n
|
.venv\Lib\site-packages\anyio\_core\_asyncio_selector_thread.py
|
_asyncio_selector_thread.py
|
Python
| 5,626 | 0.95 | 0.209581 | 0.072464 |
vue-tools
| 233 |
2023-12-16T03:22:56.676128
|
BSD-3-Clause
| false |
f1f3ced25c061fbb15a0f70048ef7e21
|
from __future__ import annotations\n\nimport math\nimport sys\nimport threading\nfrom collections.abc import Awaitable, Callable, Generator\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom typing import TYPE_CHECKING, Any, TypeVar\n\nimport sniffio\n\nif sys.version_info >= (3, 11):\n from typing import TypeVarTuple, Unpack\nelse:\n from typing_extensions import TypeVarTuple, Unpack\n\nif TYPE_CHECKING:\n from ..abc import AsyncBackend\n\n# This must be updated when new backends are introduced\nBACKENDS = "asyncio", "trio"\n\nT_Retval = TypeVar("T_Retval")\nPosArgsT = TypeVarTuple("PosArgsT")\n\nthreadlocals = threading.local()\nloaded_backends: dict[str, type[AsyncBackend]] = {}\n\n\ndef run(\n func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],\n *args: Unpack[PosArgsT],\n backend: str = "asyncio",\n backend_options: dict[str, Any] | None = None,\n) -> T_Retval:\n """\n Run the given coroutine function in an asynchronous event loop.\n\n The current thread must not be already running an event loop.\n\n :param func: a coroutine function\n :param args: positional arguments to ``func``\n :param backend: name of the asynchronous event loop implementation – currently\n either ``asyncio`` or ``trio``\n :param backend_options: keyword arguments to call the backend ``run()``\n implementation with (documented :ref:`here <backend options>`)\n :return: the return value of the coroutine function\n :raises RuntimeError: if an asynchronous event loop is already running in this\n thread\n :raises LookupError: if the named backend is not found\n\n """\n try:\n asynclib_name = sniffio.current_async_library()\n except sniffio.AsyncLibraryNotFoundError:\n pass\n else:\n raise RuntimeError(f"Already running {asynclib_name} in this thread")\n\n try:\n async_backend = get_async_backend(backend)\n except ImportError as exc:\n raise LookupError(f"No such backend: {backend}") from exc\n\n token = None\n if sniffio.current_async_library_cvar.get(None) is None:\n # Since we're in control of the event loop, we can cache the name of the async\n # library\n token = sniffio.current_async_library_cvar.set(backend)\n\n try:\n backend_options = backend_options or {}\n return async_backend.run(func, args, {}, backend_options)\n finally:\n if token:\n sniffio.current_async_library_cvar.reset(token)\n\n\nasync def sleep(delay: float) -> None:\n """\n Pause the current task for the specified duration.\n\n :param delay: the duration, in seconds\n\n """\n return await get_async_backend().sleep(delay)\n\n\nasync def sleep_forever() -> None:\n """\n Pause the current task until it's cancelled.\n\n This is a shortcut for ``sleep(math.inf)``.\n\n .. versionadded:: 3.1\n\n """\n await sleep(math.inf)\n\n\nasync def sleep_until(deadline: float) -> None:\n """\n Pause the current task until the given time.\n\n :param deadline: the absolute time to wake up at (according to the internal\n monotonic clock of the event loop)\n\n .. versionadded:: 3.1\n\n """\n now = current_time()\n await sleep(max(deadline - now, 0))\n\n\ndef current_time() -> float:\n """\n Return the current value of the event loop's internal clock.\n\n :return: the clock value (seconds)\n\n """\n return get_async_backend().current_time()\n\n\ndef get_all_backends() -> tuple[str, ...]:\n """Return a tuple of the names of all built-in backends."""\n return BACKENDS\n\n\ndef get_cancelled_exc_class() -> type[BaseException]:\n """Return the current async library's cancellation exception class."""\n return get_async_backend().cancelled_exception_class()\n\n\n#\n# Private API\n#\n\n\n@contextmanager\ndef claim_worker_thread(\n backend_class: type[AsyncBackend], token: object\n) -> Generator[Any, None, None]:\n threadlocals.current_async_backend = backend_class\n threadlocals.current_token = token\n try:\n yield\n finally:\n del threadlocals.current_async_backend\n del threadlocals.current_token\n\n\ndef get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:\n if asynclib_name is None:\n asynclib_name = sniffio.current_async_library()\n\n # We use our own dict instead of sys.modules to get the already imported back-end\n # class because the appropriate modules in sys.modules could potentially be only\n # partially initialized\n try:\n return loaded_backends[asynclib_name]\n except KeyError:\n module = import_module(f"anyio._backends._{asynclib_name}")\n loaded_backends[asynclib_name] = module.backend_class\n return module.backend_class\n
|
.venv\Lib\site-packages\anyio\_core\_eventloop.py
|
_eventloop.py
|
Python
| 4,695 | 0.95 | 0.168675 | 0.081967 |
python-kit
| 829 |
2025-03-29T16:29:39.369571
|
GPL-3.0
| false |
7235a9fcd37e87fe8f159a5a0b291fbb
|
from __future__ import annotations\n\nimport sys\nfrom collections.abc import Generator\nfrom textwrap import dedent\nfrom typing import Any\n\nif sys.version_info < (3, 11):\n from exceptiongroup import BaseExceptionGroup\n\n\nclass BrokenResourceError(Exception):\n """\n Raised when trying to use a resource that has been rendered unusable due to external\n causes (e.g. a send stream whose peer has disconnected).\n """\n\n\nclass BrokenWorkerProcess(Exception):\n """\n Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or\n otherwise misbehaves.\n """\n\n\nclass BrokenWorkerIntepreter(Exception):\n """\n Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is\n raised in the subinterpreter.\n """\n\n def __init__(self, excinfo: Any):\n # This was adapted from concurrent.futures.interpreter.ExecutionFailed\n msg = excinfo.formatted\n if not msg:\n if excinfo.type and excinfo.msg:\n msg = f"{excinfo.type.__name__}: {excinfo.msg}"\n else:\n msg = excinfo.type.__name__ or excinfo.msg\n\n super().__init__(msg)\n self.excinfo = excinfo\n\n def __str__(self) -> str:\n try:\n formatted = self.excinfo.errdisplay\n except Exception:\n return super().__str__()\n else:\n return dedent(\n f"""\n {super().__str__()}\n\n Uncaught in the interpreter:\n\n {formatted}\n """.strip()\n )\n\n\nclass BusyResourceError(Exception):\n """\n Raised when two tasks are trying to read from or write to the same resource\n concurrently.\n """\n\n def __init__(self, action: str):\n super().__init__(f"Another task is already {action} this resource")\n\n\nclass ClosedResourceError(Exception):\n """Raised when trying to use a resource that has been closed."""\n\n\nclass DelimiterNotFound(Exception):\n """\n Raised during\n :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the\n maximum number of bytes has been read without the delimiter being found.\n """\n\n def __init__(self, max_bytes: int) -> None:\n super().__init__(\n f"The delimiter was not found among the first {max_bytes} bytes"\n )\n\n\nclass EndOfStream(Exception):\n """\n Raised when trying to read from a stream that has been closed from the other end.\n """\n\n\nclass IncompleteRead(Exception):\n """\n Raised during\n :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or\n :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the\n connection is closed before the requested amount of bytes has been read.\n """\n\n def __init__(self) -> None:\n super().__init__(\n "The stream was closed before the read operation could be completed"\n )\n\n\nclass TypedAttributeLookupError(LookupError):\n """\n Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute\n is not found and no default value has been given.\n """\n\n\nclass WouldBlock(Exception):\n """Raised by ``X_nowait`` functions if ``X()`` would block."""\n\n\ndef iterate_exceptions(\n exception: BaseException,\n) -> Generator[BaseException, None, None]:\n if isinstance(exception, BaseExceptionGroup):\n for exc in exception.exceptions:\n yield from iterate_exceptions(exc)\n else:\n yield exception\n
|
.venv\Lib\site-packages\anyio\_core\_exceptions.py
|
_exceptions.py
|
Python
| 3,503 | 0.95 | 0.214286 | 0.010638 |
awesome-app
| 643 |
2025-05-30T09:48:29.622838
|
BSD-3-Clause
| false |
675efff2c3b964e8d3db940924fb96e6
|
from __future__ import annotations\n\nfrom ..abc import AsyncResource\nfrom ._tasks import CancelScope\n\n\nasync def aclose_forcefully(resource: AsyncResource) -> None:\n """\n Close an asynchronous resource in a cancelled scope.\n\n Doing this closes the resource without waiting on anything.\n\n :param resource: the resource to close\n\n """\n with CancelScope() as scope:\n scope.cancel()\n await resource.aclose()\n
|
.venv\Lib\site-packages\anyio\_core\_resources.py
|
_resources.py
|
Python
| 435 | 0.85 | 0.055556 | 0 |
python-kit
| 662 |
2025-02-09T04:25:05.377658
|
Apache-2.0
| false |
c3f7a31ae1075340ff21ad4908de13f1
|
from __future__ import annotations\n\nfrom collections.abc import AsyncIterator\nfrom contextlib import AbstractContextManager\nfrom signal import Signals\n\nfrom ._eventloop import get_async_backend\n\n\ndef open_signal_receiver(\n *signals: Signals,\n) -> AbstractContextManager[AsyncIterator[Signals]]:\n """\n Start receiving operating system signals.\n\n :param signals: signals to receive (e.g. ``signal.SIGINT``)\n :return: an asynchronous context manager for an asynchronous iterator which yields\n signal numbers\n\n .. warning:: Windows does not support signals natively so it is best to avoid\n relying on this in cross-platform applications.\n\n .. warning:: On asyncio, this permanently replaces any previous signal handler for\n the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.\n\n """\n return get_async_backend().open_signal_receiver(*signals)\n
|
.venv\Lib\site-packages\anyio\_core\_signals.py
|
_signals.py
|
Python
| 905 | 0.85 | 0.111111 | 0.052632 |
awesome-app
| 337 |
2023-09-02T12:02:28.116975
|
BSD-3-Clause
| false |
39f2f2686da900a7525f06c1790604dc
|
from __future__ import annotations\n\nimport math\nfrom typing import TypeVar\nfrom warnings import warn\n\nfrom ..streams.memory import (\n MemoryObjectReceiveStream,\n MemoryObjectSendStream,\n MemoryObjectStreamState,\n)\n\nT_Item = TypeVar("T_Item")\n\n\nclass create_memory_object_stream(\n tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],\n):\n """\n Create a memory object stream.\n\n The stream's item type can be annotated like\n :func:`create_memory_object_stream[T_Item]`.\n\n :param max_buffer_size: number of items held in the buffer until ``send()`` starts\n blocking\n :param item_type: old way of marking the streams with the right generic type for\n static typing (does nothing on AnyIO 4)\n\n .. deprecated:: 4.0\n Use ``create_memory_object_stream[YourItemType](...)`` instead.\n :return: a tuple of (send stream, receive stream)\n\n """\n\n def __new__( # type: ignore[misc]\n cls, max_buffer_size: float = 0, item_type: object = None\n ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:\n if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):\n raise ValueError("max_buffer_size must be either an integer or math.inf")\n if max_buffer_size < 0:\n raise ValueError("max_buffer_size cannot be negative")\n if item_type is not None:\n warn(\n "The item_type argument has been deprecated in AnyIO 4.0. "\n "Use create_memory_object_stream[YourItemType](...) instead.",\n DeprecationWarning,\n stacklevel=2,\n )\n\n state = MemoryObjectStreamState[T_Item](max_buffer_size)\n return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))\n
|
.venv\Lib\site-packages\anyio\_core\_streams.py
|
_streams.py
|
Python
| 1,804 | 0.95 | 0.115385 | 0 |
awesome-app
| 55 |
2025-01-20T21:28:11.587629
|
Apache-2.0
| false |
4c35b4ad7bfc94113376cb6d24da67f0
|
from __future__ import annotations\n\nimport sys\nfrom collections.abc import AsyncIterable, Iterable, Mapping, Sequence\nfrom io import BytesIO\nfrom os import PathLike\nfrom subprocess import PIPE, CalledProcessError, CompletedProcess\nfrom typing import IO, Any, Union, cast\n\nfrom ..abc import Process\nfrom ._eventloop import get_async_backend\nfrom ._tasks import create_task_group\n\nif sys.version_info >= (3, 10):\n from typing import TypeAlias\nelse:\n from typing_extensions import TypeAlias\n\nStrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]\n\n\nasync def run_process(\n command: StrOrBytesPath | Sequence[StrOrBytesPath],\n *,\n input: bytes | None = None,\n stdin: int | IO[Any] | None = None,\n stdout: int | IO[Any] | None = PIPE,\n stderr: int | IO[Any] | None = PIPE,\n check: bool = True,\n cwd: StrOrBytesPath | None = None,\n env: Mapping[str, str] | None = None,\n startupinfo: Any = None,\n creationflags: int = 0,\n start_new_session: bool = False,\n pass_fds: Sequence[int] = (),\n user: str | int | None = None,\n group: str | int | None = None,\n extra_groups: Iterable[str | int] | None = None,\n umask: int = -1,\n) -> CompletedProcess[bytes]:\n """\n Run an external command in a subprocess and wait until it completes.\n\n .. seealso:: :func:`subprocess.run`\n\n :param command: either a string to pass to the shell, or an iterable of strings\n containing the executable name or path and its arguments\n :param input: bytes passed to the standard input of the subprocess\n :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,\n a file-like object, or `None`; ``input`` overrides this\n :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,\n a file-like object, or `None`\n :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,\n :data:`subprocess.STDOUT`, a file-like object, or `None`\n :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the\n process terminates with a return code other than 0\n :param cwd: If not ``None``, change the working directory to this before running the\n command\n :param env: if not ``None``, this mapping replaces the inherited environment\n variables from the parent process\n :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used\n to specify process startup parameters (Windows only)\n :param creationflags: flags that can be used to control the creation of the\n subprocess (see :class:`subprocess.Popen` for the specifics)\n :param start_new_session: if ``true`` the setsid() system call will be made in the\n child process prior to the execution of the subprocess. (POSIX only)\n :param pass_fds: sequence of file descriptors to keep open between the parent and\n child processes. (POSIX only)\n :param user: effective user to run the process as (Python >= 3.9, POSIX only)\n :param group: effective group to run the process as (Python >= 3.9, POSIX only)\n :param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,\n POSIX only)\n :param umask: if not negative, this umask is applied in the child process before\n running the given command (Python >= 3.9, POSIX only)\n :return: an object representing the completed process\n :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process\n exits with a nonzero return code\n\n """\n\n async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:\n buffer = BytesIO()\n async for chunk in stream:\n buffer.write(chunk)\n\n stream_contents[index] = buffer.getvalue()\n\n if stdin is not None and input is not None:\n raise ValueError("only one of stdin and input is allowed")\n\n async with await open_process(\n command,\n stdin=PIPE if input else stdin,\n stdout=stdout,\n stderr=stderr,\n cwd=cwd,\n env=env,\n startupinfo=startupinfo,\n creationflags=creationflags,\n start_new_session=start_new_session,\n pass_fds=pass_fds,\n user=user,\n group=group,\n extra_groups=extra_groups,\n umask=umask,\n ) as process:\n stream_contents: list[bytes | None] = [None, None]\n async with create_task_group() as tg:\n if process.stdout:\n tg.start_soon(drain_stream, process.stdout, 0)\n\n if process.stderr:\n tg.start_soon(drain_stream, process.stderr, 1)\n\n if process.stdin and input:\n await process.stdin.send(input)\n await process.stdin.aclose()\n\n await process.wait()\n\n output, errors = stream_contents\n if check and process.returncode != 0:\n raise CalledProcessError(cast(int, process.returncode), command, output, errors)\n\n return CompletedProcess(command, cast(int, process.returncode), output, errors)\n\n\nasync def open_process(\n command: StrOrBytesPath | Sequence[StrOrBytesPath],\n *,\n stdin: int | IO[Any] | None = PIPE,\n stdout: int | IO[Any] | None = PIPE,\n stderr: int | IO[Any] | None = PIPE,\n cwd: StrOrBytesPath | None = None,\n env: Mapping[str, str] | None = None,\n startupinfo: Any = None,\n creationflags: int = 0,\n start_new_session: bool = False,\n pass_fds: Sequence[int] = (),\n user: str | int | None = None,\n group: str | int | None = None,\n extra_groups: Iterable[str | int] | None = None,\n umask: int = -1,\n) -> Process:\n """\n Start an external command in a subprocess.\n\n .. seealso:: :class:`subprocess.Popen`\n\n :param command: either a string to pass to the shell, or an iterable of strings\n containing the executable name or path and its arguments\n :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a\n file-like object, or ``None``\n :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,\n a file-like object, or ``None``\n :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,\n :data:`subprocess.STDOUT`, a file-like object, or ``None``\n :param cwd: If not ``None``, the working directory is changed before executing\n :param env: If env is not ``None``, it must be a mapping that defines the\n environment variables for the new process\n :param creationflags: flags that can be used to control the creation of the\n subprocess (see :class:`subprocess.Popen` for the specifics)\n :param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used\n to specify process startup parameters (Windows only)\n :param start_new_session: if ``true`` the setsid() system call will be made in the\n child process prior to the execution of the subprocess. (POSIX only)\n :param pass_fds: sequence of file descriptors to keep open between the parent and\n child processes. (POSIX only)\n :param user: effective user to run the process as (POSIX only)\n :param group: effective group to run the process as (POSIX only)\n :param extra_groups: supplementary groups to set in the subprocess (POSIX only)\n :param umask: if not negative, this umask is applied in the child process before\n running the given command (POSIX only)\n :return: an asynchronous process object\n\n """\n kwargs: dict[str, Any] = {}\n if user is not None:\n kwargs["user"] = user\n\n if group is not None:\n kwargs["group"] = group\n\n if extra_groups is not None:\n kwargs["extra_groups"] = group\n\n if umask >= 0:\n kwargs["umask"] = umask\n\n return await get_async_backend().open_process(\n command,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n cwd=cwd,\n env=env,\n startupinfo=startupinfo,\n creationflags=creationflags,\n start_new_session=start_new_session,\n pass_fds=pass_fds,\n **kwargs,\n )\n
|
.venv\Lib\site-packages\anyio\_core\_subprocesses.py
|
_subprocesses.py
|
Python
| 8,047 | 0.85 | 0.153465 | 0.017143 |
python-kit
| 757 |
2024-08-25T04:23:19.841222
|
BSD-3-Clause
| false |
dc98ba21bd805bc0bccc0e2e69c6ca80
|
from __future__ import annotations\n\nimport math\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom types import TracebackType\n\nfrom sniffio import AsyncLibraryNotFoundError\n\nfrom ..lowlevel import checkpoint\nfrom ._eventloop import get_async_backend\nfrom ._exceptions import BusyResourceError\nfrom ._tasks import CancelScope\nfrom ._testing import TaskInfo, get_current_task\n\n\n@dataclass(frozen=True)\nclass EventStatistics:\n """\n :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`\n """\n\n tasks_waiting: int\n\n\n@dataclass(frozen=True)\nclass CapacityLimiterStatistics:\n """\n :ivar int borrowed_tokens: number of tokens currently borrowed by tasks\n :ivar float total_tokens: total number of available tokens\n :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from\n this limiter\n :ivar int tasks_waiting: number of tasks waiting on\n :meth:`~.CapacityLimiter.acquire` or\n :meth:`~.CapacityLimiter.acquire_on_behalf_of`\n """\n\n borrowed_tokens: int\n total_tokens: float\n borrowers: tuple[object, ...]\n tasks_waiting: int\n\n\n@dataclass(frozen=True)\nclass LockStatistics:\n """\n :ivar bool locked: flag indicating if this lock is locked or not\n :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the\n lock is not held by any task)\n :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`\n """\n\n locked: bool\n owner: TaskInfo | None\n tasks_waiting: int\n\n\n@dataclass(frozen=True)\nclass ConditionStatistics:\n """\n :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`\n :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying\n :class:`~.Lock`\n """\n\n tasks_waiting: int\n lock_statistics: LockStatistics\n\n\n@dataclass(frozen=True)\nclass SemaphoreStatistics:\n """\n :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`\n\n """\n\n tasks_waiting: int\n\n\nclass Event:\n def __new__(cls) -> Event:\n try:\n return get_async_backend().create_event()\n except AsyncLibraryNotFoundError:\n return EventAdapter()\n\n def set(self) -> None:\n """Set the flag, notifying all listeners."""\n raise NotImplementedError\n\n def is_set(self) -> bool:\n """Return ``True`` if the flag is set, ``False`` if not."""\n raise NotImplementedError\n\n async def wait(self) -> None:\n """\n Wait until the flag has been set.\n\n If the flag has already been set when this method is called, it returns\n immediately.\n\n """\n raise NotImplementedError\n\n def statistics(self) -> EventStatistics:\n """Return statistics about the current state of this event."""\n raise NotImplementedError\n\n\nclass EventAdapter(Event):\n _internal_event: Event | None = None\n _is_set: bool = False\n\n def __new__(cls) -> EventAdapter:\n return object.__new__(cls)\n\n @property\n def _event(self) -> Event:\n if self._internal_event is None:\n self._internal_event = get_async_backend().create_event()\n if self._is_set:\n self._internal_event.set()\n\n return self._internal_event\n\n def set(self) -> None:\n if self._internal_event is None:\n self._is_set = True\n else:\n self._event.set()\n\n def is_set(self) -> bool:\n if self._internal_event is None:\n return self._is_set\n\n return self._internal_event.is_set()\n\n async def wait(self) -> None:\n await self._event.wait()\n\n def statistics(self) -> EventStatistics:\n if self._internal_event is None:\n return EventStatistics(tasks_waiting=0)\n\n return self._internal_event.statistics()\n\n\nclass Lock:\n def __new__(cls, *, fast_acquire: bool = False) -> Lock:\n try:\n return get_async_backend().create_lock(fast_acquire=fast_acquire)\n except AsyncLibraryNotFoundError:\n return LockAdapter(fast_acquire=fast_acquire)\n\n async def __aenter__(self) -> None:\n await self.acquire()\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self.release()\n\n async def acquire(self) -> None:\n """Acquire the lock."""\n raise NotImplementedError\n\n def acquire_nowait(self) -> None:\n """\n Acquire the lock, without blocking.\n\n :raises ~anyio.WouldBlock: if the operation would block\n\n """\n raise NotImplementedError\n\n def release(self) -> None:\n """Release the lock."""\n raise NotImplementedError\n\n def locked(self) -> bool:\n """Return True if the lock is currently held."""\n raise NotImplementedError\n\n def statistics(self) -> LockStatistics:\n """\n Return statistics about the current state of this lock.\n\n .. versionadded:: 3.0\n """\n raise NotImplementedError\n\n\nclass LockAdapter(Lock):\n _internal_lock: Lock | None = None\n\n def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter:\n return object.__new__(cls)\n\n def __init__(self, *, fast_acquire: bool = False):\n self._fast_acquire = fast_acquire\n\n @property\n def _lock(self) -> Lock:\n if self._internal_lock is None:\n self._internal_lock = get_async_backend().create_lock(\n fast_acquire=self._fast_acquire\n )\n\n return self._internal_lock\n\n async def __aenter__(self) -> None:\n await self._lock.acquire()\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n if self._internal_lock is not None:\n self._internal_lock.release()\n\n async def acquire(self) -> None:\n """Acquire the lock."""\n await self._lock.acquire()\n\n def acquire_nowait(self) -> None:\n """\n Acquire the lock, without blocking.\n\n :raises ~anyio.WouldBlock: if the operation would block\n\n """\n self._lock.acquire_nowait()\n\n def release(self) -> None:\n """Release the lock."""\n self._lock.release()\n\n def locked(self) -> bool:\n """Return True if the lock is currently held."""\n return self._lock.locked()\n\n def statistics(self) -> LockStatistics:\n """\n Return statistics about the current state of this lock.\n\n .. versionadded:: 3.0\n\n """\n if self._internal_lock is None:\n return LockStatistics(False, None, 0)\n\n return self._internal_lock.statistics()\n\n\nclass Condition:\n _owner_task: TaskInfo | None = None\n\n def __init__(self, lock: Lock | None = None):\n self._lock = lock or Lock()\n self._waiters: deque[Event] = deque()\n\n async def __aenter__(self) -> None:\n await self.acquire()\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self.release()\n\n def _check_acquired(self) -> None:\n if self._owner_task != get_current_task():\n raise RuntimeError("The current task is not holding the underlying lock")\n\n async def acquire(self) -> None:\n """Acquire the underlying lock."""\n await self._lock.acquire()\n self._owner_task = get_current_task()\n\n def acquire_nowait(self) -> None:\n """\n Acquire the underlying lock, without blocking.\n\n :raises ~anyio.WouldBlock: if the operation would block\n\n """\n self._lock.acquire_nowait()\n self._owner_task = get_current_task()\n\n def release(self) -> None:\n """Release the underlying lock."""\n self._lock.release()\n\n def locked(self) -> bool:\n """Return True if the lock is set."""\n return self._lock.locked()\n\n def notify(self, n: int = 1) -> None:\n """Notify exactly n listeners."""\n self._check_acquired()\n for _ in range(n):\n try:\n event = self._waiters.popleft()\n except IndexError:\n break\n\n event.set()\n\n def notify_all(self) -> None:\n """Notify all the listeners."""\n self._check_acquired()\n for event in self._waiters:\n event.set()\n\n self._waiters.clear()\n\n async def wait(self) -> None:\n """Wait for a notification."""\n await checkpoint()\n event = Event()\n self._waiters.append(event)\n self.release()\n try:\n await event.wait()\n except BaseException:\n if not event.is_set():\n self._waiters.remove(event)\n\n raise\n finally:\n with CancelScope(shield=True):\n await self.acquire()\n\n def statistics(self) -> ConditionStatistics:\n """\n Return statistics about the current state of this condition.\n\n .. versionadded:: 3.0\n """\n return ConditionStatistics(len(self._waiters), self._lock.statistics())\n\n\nclass Semaphore:\n def __new__(\n cls,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> Semaphore:\n try:\n return get_async_backend().create_semaphore(\n initial_value, max_value=max_value, fast_acquire=fast_acquire\n )\n except AsyncLibraryNotFoundError:\n return SemaphoreAdapter(initial_value, max_value=max_value)\n\n def __init__(\n self,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ):\n if not isinstance(initial_value, int):\n raise TypeError("initial_value must be an integer")\n if initial_value < 0:\n raise ValueError("initial_value must be >= 0")\n if max_value is not None:\n if not isinstance(max_value, int):\n raise TypeError("max_value must be an integer or None")\n if max_value < initial_value:\n raise ValueError(\n "max_value must be equal to or higher than initial_value"\n )\n\n self._fast_acquire = fast_acquire\n\n async def __aenter__(self) -> Semaphore:\n await self.acquire()\n return self\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self.release()\n\n async def acquire(self) -> None:\n """Decrement the semaphore value, blocking if necessary."""\n raise NotImplementedError\n\n def acquire_nowait(self) -> None:\n """\n Acquire the underlying lock, without blocking.\n\n :raises ~anyio.WouldBlock: if the operation would block\n\n """\n raise NotImplementedError\n\n def release(self) -> None:\n """Increment the semaphore value."""\n raise NotImplementedError\n\n @property\n def value(self) -> int:\n """The current value of the semaphore."""\n raise NotImplementedError\n\n @property\n def max_value(self) -> int | None:\n """The maximum value of the semaphore."""\n raise NotImplementedError\n\n def statistics(self) -> SemaphoreStatistics:\n """\n Return statistics about the current state of this semaphore.\n\n .. versionadded:: 3.0\n """\n raise NotImplementedError\n\n\nclass SemaphoreAdapter(Semaphore):\n _internal_semaphore: Semaphore | None = None\n\n def __new__(\n cls,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> SemaphoreAdapter:\n return object.__new__(cls)\n\n def __init__(\n self,\n initial_value: int,\n *,\n max_value: int | None = None,\n fast_acquire: bool = False,\n ) -> None:\n super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)\n self._initial_value = initial_value\n self._max_value = max_value\n\n @property\n def _semaphore(self) -> Semaphore:\n if self._internal_semaphore is None:\n self._internal_semaphore = get_async_backend().create_semaphore(\n self._initial_value, max_value=self._max_value\n )\n\n return self._internal_semaphore\n\n async def acquire(self) -> None:\n await self._semaphore.acquire()\n\n def acquire_nowait(self) -> None:\n self._semaphore.acquire_nowait()\n\n def release(self) -> None:\n self._semaphore.release()\n\n @property\n def value(self) -> int:\n if self._internal_semaphore is None:\n return self._initial_value\n\n return self._semaphore.value\n\n @property\n def max_value(self) -> int | None:\n return self._max_value\n\n def statistics(self) -> SemaphoreStatistics:\n if self._internal_semaphore is None:\n return SemaphoreStatistics(tasks_waiting=0)\n\n return self._semaphore.statistics()\n\n\nclass CapacityLimiter:\n def __new__(cls, total_tokens: float) -> CapacityLimiter:\n try:\n return get_async_backend().create_capacity_limiter(total_tokens)\n except AsyncLibraryNotFoundError:\n return CapacityLimiterAdapter(total_tokens)\n\n async def __aenter__(self) -> None:\n raise NotImplementedError\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n raise NotImplementedError\n\n @property\n def total_tokens(self) -> float:\n """\n The total number of tokens available for borrowing.\n\n This is a read-write property. If the total number of tokens is increased, the\n proportionate number of tasks waiting on this limiter will be granted their\n tokens.\n\n .. versionchanged:: 3.0\n The property is now writable.\n\n """\n raise NotImplementedError\n\n @total_tokens.setter\n def total_tokens(self, value: float) -> None:\n raise NotImplementedError\n\n @property\n def borrowed_tokens(self) -> int:\n """The number of tokens that have currently been borrowed."""\n raise NotImplementedError\n\n @property\n def available_tokens(self) -> float:\n """The number of tokens currently available to be borrowed"""\n raise NotImplementedError\n\n def acquire_nowait(self) -> None:\n """\n Acquire a token for the current task without waiting for one to become\n available.\n\n :raises ~anyio.WouldBlock: if there are no tokens available for borrowing\n\n """\n raise NotImplementedError\n\n def acquire_on_behalf_of_nowait(self, borrower: object) -> None:\n """\n Acquire a token without waiting for one to become available.\n\n :param borrower: the entity borrowing a token\n :raises ~anyio.WouldBlock: if there are no tokens available for borrowing\n\n """\n raise NotImplementedError\n\n async def acquire(self) -> None:\n """\n Acquire a token for the current task, waiting if necessary for one to become\n available.\n\n """\n raise NotImplementedError\n\n async def acquire_on_behalf_of(self, borrower: object) -> None:\n """\n Acquire a token, waiting if necessary for one to become available.\n\n :param borrower: the entity borrowing a token\n\n """\n raise NotImplementedError\n\n def release(self) -> None:\n """\n Release the token held by the current task.\n\n :raises RuntimeError: if the current task has not borrowed a token from this\n limiter.\n\n """\n raise NotImplementedError\n\n def release_on_behalf_of(self, borrower: object) -> None:\n """\n Release the token held by the given borrower.\n\n :raises RuntimeError: if the borrower has not borrowed a token from this\n limiter.\n\n """\n raise NotImplementedError\n\n def statistics(self) -> CapacityLimiterStatistics:\n """\n Return statistics about the current state of this limiter.\n\n .. versionadded:: 3.0\n\n """\n raise NotImplementedError\n\n\nclass CapacityLimiterAdapter(CapacityLimiter):\n _internal_limiter: CapacityLimiter | None = None\n\n def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:\n return object.__new__(cls)\n\n def __init__(self, total_tokens: float) -> None:\n self.total_tokens = total_tokens\n\n @property\n def _limiter(self) -> CapacityLimiter:\n if self._internal_limiter is None:\n self._internal_limiter = get_async_backend().create_capacity_limiter(\n self._total_tokens\n )\n\n return self._internal_limiter\n\n async def __aenter__(self) -> None:\n await self._limiter.__aenter__()\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool | None:\n return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)\n\n @property\n def total_tokens(self) -> float:\n if self._internal_limiter is None:\n return self._total_tokens\n\n return self._internal_limiter.total_tokens\n\n @total_tokens.setter\n def total_tokens(self, value: float) -> None:\n if not isinstance(value, int) and value is not math.inf:\n raise TypeError("total_tokens must be an int or math.inf")\n elif value < 1:\n raise ValueError("total_tokens must be >= 1")\n\n if self._internal_limiter is None:\n self._total_tokens = value\n return\n\n self._limiter.total_tokens = value\n\n @property\n def borrowed_tokens(self) -> int:\n if self._internal_limiter is None:\n return 0\n\n return self._internal_limiter.borrowed_tokens\n\n @property\n def available_tokens(self) -> float:\n if self._internal_limiter is None:\n return self._total_tokens\n\n return self._internal_limiter.available_tokens\n\n def acquire_nowait(self) -> None:\n self._limiter.acquire_nowait()\n\n def acquire_on_behalf_of_nowait(self, borrower: object) -> None:\n self._limiter.acquire_on_behalf_of_nowait(borrower)\n\n async def acquire(self) -> None:\n await self._limiter.acquire()\n\n async def acquire_on_behalf_of(self, borrower: object) -> None:\n await self._limiter.acquire_on_behalf_of(borrower)\n\n def release(self) -> None:\n self._limiter.release()\n\n def release_on_behalf_of(self, borrower: object) -> None:\n self._limiter.release_on_behalf_of(borrower)\n\n def statistics(self) -> CapacityLimiterStatistics:\n if self._internal_limiter is None:\n return CapacityLimiterStatistics(\n borrowed_tokens=0,\n total_tokens=self.total_tokens,\n borrowers=(),\n tasks_waiting=0,\n )\n\n return self._internal_limiter.statistics()\n\n\nclass ResourceGuard:\n """\n A context manager for ensuring that a resource is only used by a single task at a\n time.\n\n Entering this context manager while the previous has not exited it yet will trigger\n :exc:`BusyResourceError`.\n\n :param action: the action to guard against (visible in the :exc:`BusyResourceError`\n when triggered, e.g. "Another task is already {action} this resource")\n\n .. versionadded:: 4.1\n """\n\n __slots__ = "action", "_guarded"\n\n def __init__(self, action: str = "using"):\n self.action: str = action\n self._guarded = False\n\n def __enter__(self) -> None:\n if self._guarded:\n raise BusyResourceError(self.action)\n\n self._guarded = True\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> None:\n self._guarded = False\n
|
.venv\Lib\site-packages\anyio\_core\_synchronization.py
|
_synchronization.py
|
Python
| 20,320 | 0.85 | 0.236339 | 0.007286 |
react-lib
| 849 |
2024-02-08T07:29:35.191424
|
BSD-3-Clause
| false |
5303f6905f631b068a1dc2482ed0588e
|
from __future__ import annotations\n\nimport math\nfrom collections.abc import Generator\nfrom contextlib import contextmanager\nfrom types import TracebackType\n\nfrom ..abc._tasks import TaskGroup, TaskStatus\nfrom ._eventloop import get_async_backend\n\n\nclass _IgnoredTaskStatus(TaskStatus[object]):\n def started(self, value: object = None) -> None:\n pass\n\n\nTASK_STATUS_IGNORED = _IgnoredTaskStatus()\n\n\nclass CancelScope:\n """\n Wraps a unit of work that can be made separately cancellable.\n\n :param deadline: The time (clock value) when this scope is cancelled automatically\n :param shield: ``True`` to shield the cancel scope from external cancellation\n """\n\n def __new__(\n cls, *, deadline: float = math.inf, shield: bool = False\n ) -> CancelScope:\n return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)\n\n def cancel(self) -> None:\n """Cancel this scope immediately."""\n raise NotImplementedError\n\n @property\n def deadline(self) -> float:\n """\n The time (clock value) when this scope is cancelled automatically.\n\n Will be ``float('inf')`` if no timeout has been set.\n\n """\n raise NotImplementedError\n\n @deadline.setter\n def deadline(self, value: float) -> None:\n raise NotImplementedError\n\n @property\n def cancel_called(self) -> bool:\n """``True`` if :meth:`cancel` has been called."""\n raise NotImplementedError\n\n @property\n def cancelled_caught(self) -> bool:\n """\n ``True`` if this scope suppressed a cancellation exception it itself raised.\n\n This is typically used to check if any work was interrupted, or to see if the\n scope was cancelled due to its deadline being reached. The value will, however,\n only be ``True`` if the cancellation was triggered by the scope itself (and not\n an outer scope).\n\n """\n raise NotImplementedError\n\n @property\n def shield(self) -> bool:\n """\n ``True`` if this scope is shielded from external cancellation.\n\n While a scope is shielded, it will not receive cancellations from outside.\n\n """\n raise NotImplementedError\n\n @shield.setter\n def shield(self, value: bool) -> None:\n raise NotImplementedError\n\n def __enter__(self) -> CancelScope:\n raise NotImplementedError\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> bool:\n raise NotImplementedError\n\n\n@contextmanager\ndef fail_after(\n delay: float | None, shield: bool = False\n) -> Generator[CancelScope, None, None]:\n """\n Create a context manager which raises a :class:`TimeoutError` if does not finish in\n time.\n\n :param delay: maximum allowed time (in seconds) before raising the exception, or\n ``None`` to disable the timeout\n :param shield: ``True`` to shield the cancel scope from external cancellation\n :return: a context manager that yields a cancel scope\n :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]\n\n """\n current_time = get_async_backend().current_time\n deadline = (current_time() + delay) if delay is not None else math.inf\n with get_async_backend().create_cancel_scope(\n deadline=deadline, shield=shield\n ) as cancel_scope:\n yield cancel_scope\n\n if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:\n raise TimeoutError\n\n\ndef move_on_after(delay: float | None, shield: bool = False) -> CancelScope:\n """\n Create a cancel scope with a deadline that expires after the given delay.\n\n :param delay: maximum allowed time (in seconds) before exiting the context block, or\n ``None`` to disable the timeout\n :param shield: ``True`` to shield the cancel scope from external cancellation\n :return: a cancel scope\n\n """\n deadline = (\n (get_async_backend().current_time() + delay) if delay is not None else math.inf\n )\n return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)\n\n\ndef current_effective_deadline() -> float:\n """\n Return the nearest deadline among all the cancel scopes effective for the current\n task.\n\n :return: a clock value from the event loop's internal clock (or ``float('inf')`` if\n there is no deadline in effect, or ``float('-inf')`` if the current scope has\n been cancelled)\n :rtype: float\n\n """\n return get_async_backend().current_effective_deadline()\n\n\ndef create_task_group() -> TaskGroup:\n """\n Create a task group.\n\n :return: a task group\n\n """\n return get_async_backend().create_task_group()\n
|
.venv\Lib\site-packages\anyio\_core\_tasks.py
|
_tasks.py
|
Python
| 4,757 | 0.85 | 0.21519 | 0 |
python-kit
| 881 |
2025-02-06T17:58:46.444230
|
BSD-3-Clause
| false |
3d8e2501ee3037118ba5504b78194259
|
from __future__ import annotations\n\nimport os\nimport sys\nimport tempfile\nfrom collections.abc import Iterable\nfrom io import BytesIO, TextIOWrapper\nfrom types import TracebackType\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AnyStr,\n Generic,\n overload,\n)\n\nfrom .. import to_thread\nfrom .._core._fileio import AsyncFile\nfrom ..lowlevel import checkpoint_if_cancelled\n\nif TYPE_CHECKING:\n from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer\n\n\nclass TemporaryFile(Generic[AnyStr]):\n """\n An asynchronous temporary file that is automatically created and cleaned up.\n\n This class provides an asynchronous context manager interface to a temporary file.\n The file is created using Python's standard `tempfile.TemporaryFile` function in a\n background thread, and is wrapped as an asynchronous file using `AsyncFile`.\n\n :param mode: The mode in which the file is opened. Defaults to "w+b".\n :param buffering: The buffering policy (-1 means the default buffering).\n :param encoding: The encoding used to decode or encode the file. Only applicable in\n text mode.\n :param newline: Controls how universal newlines mode works (only applicable in text\n mode).\n :param suffix: The suffix for the temporary file name.\n :param prefix: The prefix for the temporary file name.\n :param dir: The directory in which the temporary file is created.\n :param errors: The error handling scheme used for encoding/decoding errors.\n """\n\n _async_file: AsyncFile[AnyStr]\n\n @overload\n def __init__(\n self: TemporaryFile[bytes],\n mode: OpenBinaryMode = ...,\n buffering: int = ...,\n encoding: str | None = ...,\n newline: str | None = ...,\n suffix: str | None = ...,\n prefix: str | None = ...,\n dir: str | None = ...,\n *,\n errors: str | None = ...,\n ): ...\n @overload\n def __init__(\n self: TemporaryFile[str],\n mode: OpenTextMode,\n buffering: int = ...,\n encoding: str | None = ...,\n newline: str | None = ...,\n suffix: str | None = ...,\n prefix: str | None = ...,\n dir: str | None = ...,\n *,\n errors: str | None = ...,\n ): ...\n\n def __init__(\n self,\n mode: OpenTextMode | OpenBinaryMode = "w+b",\n buffering: int = -1,\n encoding: str | None = None,\n newline: str | None = None,\n suffix: str | None = None,\n prefix: str | None = None,\n dir: str | None = None,\n *,\n errors: str | None = None,\n ) -> None:\n self.mode = mode\n self.buffering = buffering\n self.encoding = encoding\n self.newline = newline\n self.suffix: str | None = suffix\n self.prefix: str | None = prefix\n self.dir: str | None = dir\n self.errors = errors\n\n async def __aenter__(self) -> AsyncFile[AnyStr]:\n fp = await to_thread.run_sync(\n lambda: tempfile.TemporaryFile(\n self.mode,\n self.buffering,\n self.encoding,\n self.newline,\n self.suffix,\n self.prefix,\n self.dir,\n errors=self.errors,\n )\n )\n self._async_file = AsyncFile(fp)\n return self._async_file\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n await self._async_file.aclose()\n\n\nclass NamedTemporaryFile(Generic[AnyStr]):\n """\n An asynchronous named temporary file that is automatically created and cleaned up.\n\n This class provides an asynchronous context manager for a temporary file with a\n visible name in the file system. It uses Python's standard\n :func:`~tempfile.NamedTemporaryFile` function and wraps the file object with\n :class:`AsyncFile` for asynchronous operations.\n\n :param mode: The mode in which the file is opened. Defaults to "w+b".\n :param buffering: The buffering policy (-1 means the default buffering).\n :param encoding: The encoding used to decode or encode the file. Only applicable in\n text mode.\n :param newline: Controls how universal newlines mode works (only applicable in text\n mode).\n :param suffix: The suffix for the temporary file name.\n :param prefix: The prefix for the temporary file name.\n :param dir: The directory in which the temporary file is created.\n :param delete: Whether to delete the file when it is closed.\n :param errors: The error handling scheme used for encoding/decoding errors.\n :param delete_on_close: (Python 3.12+) Whether to delete the file on close.\n """\n\n _async_file: AsyncFile[AnyStr]\n\n @overload\n def __init__(\n self: NamedTemporaryFile[bytes],\n mode: OpenBinaryMode = ...,\n buffering: int = ...,\n encoding: str | None = ...,\n newline: str | None = ...,\n suffix: str | None = ...,\n prefix: str | None = ...,\n dir: str | None = ...,\n delete: bool = ...,\n *,\n errors: str | None = ...,\n delete_on_close: bool = ...,\n ): ...\n @overload\n def __init__(\n self: NamedTemporaryFile[str],\n mode: OpenTextMode,\n buffering: int = ...,\n encoding: str | None = ...,\n newline: str | None = ...,\n suffix: str | None = ...,\n prefix: str | None = ...,\n dir: str | None = ...,\n delete: bool = ...,\n *,\n errors: str | None = ...,\n delete_on_close: bool = ...,\n ): ...\n\n def __init__(\n self,\n mode: OpenBinaryMode | OpenTextMode = "w+b",\n buffering: int = -1,\n encoding: str | None = None,\n newline: str | None = None,\n suffix: str | None = None,\n prefix: str | None = None,\n dir: str | None = None,\n delete: bool = True,\n *,\n errors: str | None = None,\n delete_on_close: bool = True,\n ) -> None:\n self._params: dict[str, Any] = {\n "mode": mode,\n "buffering": buffering,\n "encoding": encoding,\n "newline": newline,\n "suffix": suffix,\n "prefix": prefix,\n "dir": dir,\n "delete": delete,\n "errors": errors,\n }\n if sys.version_info >= (3, 12):\n self._params["delete_on_close"] = delete_on_close\n\n async def __aenter__(self) -> AsyncFile[AnyStr]:\n fp = await to_thread.run_sync(\n lambda: tempfile.NamedTemporaryFile(**self._params)\n )\n self._async_file = AsyncFile(fp)\n return self._async_file\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n await self._async_file.aclose()\n\n\nclass SpooledTemporaryFile(AsyncFile[AnyStr]):\n """\n An asynchronous spooled temporary file that starts in memory and is spooled to disk.\n\n This class provides an asynchronous interface to a spooled temporary file, much like\n Python's standard :class:`~tempfile.SpooledTemporaryFile`. It supports asynchronous\n write operations and provides a method to force a rollover to disk.\n\n :param max_size: Maximum size in bytes before the file is rolled over to disk.\n :param mode: The mode in which the file is opened. Defaults to "w+b".\n :param buffering: The buffering policy (-1 means the default buffering).\n :param encoding: The encoding used to decode or encode the file (text mode only).\n :param newline: Controls how universal newlines mode works (text mode only).\n :param suffix: The suffix for the temporary file name.\n :param prefix: The prefix for the temporary file name.\n :param dir: The directory in which the temporary file is created.\n :param errors: The error handling scheme used for encoding/decoding errors.\n """\n\n _rolled: bool = False\n\n @overload\n def __init__(\n self: SpooledTemporaryFile[bytes],\n max_size: int = ...,\n mode: OpenBinaryMode = ...,\n buffering: int = ...,\n encoding: str | None = ...,\n newline: str | None = ...,\n suffix: str | None = ...,\n prefix: str | None = ...,\n dir: str | None = ...,\n *,\n errors: str | None = ...,\n ): ...\n @overload\n def __init__(\n self: SpooledTemporaryFile[str],\n max_size: int = ...,\n mode: OpenTextMode = ...,\n buffering: int = ...,\n encoding: str | None = ...,\n newline: str | None = ...,\n suffix: str | None = ...,\n prefix: str | None = ...,\n dir: str | None = ...,\n *,\n errors: str | None = ...,\n ): ...\n\n def __init__(\n self,\n max_size: int = 0,\n mode: OpenBinaryMode | OpenTextMode = "w+b",\n buffering: int = -1,\n encoding: str | None = None,\n newline: str | None = None,\n suffix: str | None = None,\n prefix: str | None = None,\n dir: str | None = None,\n *,\n errors: str | None = None,\n ) -> None:\n self._tempfile_params: dict[str, Any] = {\n "mode": mode,\n "buffering": buffering,\n "encoding": encoding,\n "newline": newline,\n "suffix": suffix,\n "prefix": prefix,\n "dir": dir,\n "errors": errors,\n }\n self._max_size = max_size\n if "b" in mode:\n super().__init__(BytesIO()) # type: ignore[arg-type]\n else:\n super().__init__(\n TextIOWrapper( # type: ignore[arg-type]\n BytesIO(),\n encoding=encoding,\n errors=errors,\n newline=newline,\n write_through=True,\n )\n )\n\n async def aclose(self) -> None:\n if not self._rolled:\n self._fp.close()\n return\n\n await super().aclose()\n\n async def _check(self) -> None:\n if self._rolled or self._fp.tell() < self._max_size:\n return\n\n await self.rollover()\n\n async def rollover(self) -> None:\n if self._rolled:\n return\n\n self._rolled = True\n buffer = self._fp\n buffer.seek(0)\n self._fp = await to_thread.run_sync(\n lambda: tempfile.TemporaryFile(**self._tempfile_params)\n )\n await self.write(buffer.read())\n buffer.close()\n\n @property\n def closed(self) -> bool:\n return self._fp.closed\n\n async def read(self, size: int = -1) -> AnyStr:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.read(size)\n\n return await super().read(size) # type: ignore[return-value]\n\n async def read1(self: SpooledTemporaryFile[bytes], size: int = -1) -> bytes:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.read1(size)\n\n return await super().read1(size)\n\n async def readline(self) -> AnyStr:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.readline()\n\n return await super().readline() # type: ignore[return-value]\n\n async def readlines(self) -> list[AnyStr]:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.readlines()\n\n return await super().readlines() # type: ignore[return-value]\n\n async def readinto(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:\n if not self._rolled:\n await checkpoint_if_cancelled()\n self._fp.readinto(b)\n\n return await super().readinto(b)\n\n async def readinto1(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:\n if not self._rolled:\n await checkpoint_if_cancelled()\n self._fp.readinto(b)\n\n return await super().readinto1(b)\n\n async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.seek(offset, whence)\n\n return await super().seek(offset, whence)\n\n async def tell(self) -> int:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.tell()\n\n return await super().tell()\n\n async def truncate(self, size: int | None = None) -> int:\n if not self._rolled:\n await checkpoint_if_cancelled()\n return self._fp.truncate(size)\n\n return await super().truncate(size)\n\n @overload\n async def write(self: SpooledTemporaryFile[bytes], b: ReadableBuffer) -> int: ...\n @overload\n async def write(self: SpooledTemporaryFile[str], b: str) -> int: ...\n\n async def write(self, b: ReadableBuffer | str) -> int:\n """\n Asynchronously write data to the spooled temporary file.\n\n If the file has not yet been rolled over, the data is written synchronously,\n and a rollover is triggered if the size exceeds the maximum size.\n\n :param s: The data to write.\n :return: The number of bytes written.\n :raises RuntimeError: If the underlying file is not initialized.\n\n """\n if not self._rolled:\n await checkpoint_if_cancelled()\n result = self._fp.write(b)\n await self._check()\n return result\n\n return await super().write(b) # type: ignore[misc]\n\n @overload\n async def writelines(\n self: SpooledTemporaryFile[bytes], lines: Iterable[ReadableBuffer]\n ) -> None: ...\n @overload\n async def writelines(\n self: SpooledTemporaryFile[str], lines: Iterable[str]\n ) -> None: ...\n\n async def writelines(self, lines: Iterable[str] | Iterable[ReadableBuffer]) -> None:\n """\n Asynchronously write a list of lines to the spooled temporary file.\n\n If the file has not yet been rolled over, the lines are written synchronously,\n and a rollover is triggered if the size exceeds the maximum size.\n\n :param lines: An iterable of lines to write.\n :raises RuntimeError: If the underlying file is not initialized.\n\n """\n if not self._rolled:\n await checkpoint_if_cancelled()\n result = self._fp.writelines(lines)\n await self._check()\n return result\n\n return await super().writelines(lines) # type: ignore[misc]\n\n\nclass TemporaryDirectory(Generic[AnyStr]):\n """\n An asynchronous temporary directory that is created and cleaned up automatically.\n\n This class provides an asynchronous context manager for creating a temporary\n directory. It wraps Python's standard :class:`~tempfile.TemporaryDirectory` to\n perform directory creation and cleanup operations in a background thread.\n\n :param suffix: Suffix to be added to the temporary directory name.\n :param prefix: Prefix to be added to the temporary directory name.\n :param dir: The parent directory where the temporary directory is created.\n :param ignore_cleanup_errors: Whether to ignore errors during cleanup\n (Python 3.10+).\n :param delete: Whether to delete the directory upon closing (Python 3.12+).\n """\n\n def __init__(\n self,\n suffix: AnyStr | None = None,\n prefix: AnyStr | None = None,\n dir: AnyStr | None = None,\n *,\n ignore_cleanup_errors: bool = False,\n delete: bool = True,\n ) -> None:\n self.suffix: AnyStr | None = suffix\n self.prefix: AnyStr | None = prefix\n self.dir: AnyStr | None = dir\n self.ignore_cleanup_errors = ignore_cleanup_errors\n self.delete = delete\n\n self._tempdir: tempfile.TemporaryDirectory | None = None\n\n async def __aenter__(self) -> str:\n params: dict[str, Any] = {\n "suffix": self.suffix,\n "prefix": self.prefix,\n "dir": self.dir,\n }\n if sys.version_info >= (3, 10):\n params["ignore_cleanup_errors"] = self.ignore_cleanup_errors\n\n if sys.version_info >= (3, 12):\n params["delete"] = self.delete\n\n self._tempdir = await to_thread.run_sync(\n lambda: tempfile.TemporaryDirectory(**params)\n )\n return await to_thread.run_sync(self._tempdir.__enter__)\n\n async def __aexit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n if self._tempdir is not None:\n await to_thread.run_sync(\n self._tempdir.__exit__, exc_type, exc_value, traceback\n )\n\n async def cleanup(self) -> None:\n if self._tempdir is not None:\n await to_thread.run_sync(self._tempdir.cleanup)\n\n\n@overload\nasync def mkstemp(\n suffix: str | None = None,\n prefix: str | None = None,\n dir: str | None = None,\n text: bool = False,\n) -> tuple[int, str]: ...\n\n\n@overload\nasync def mkstemp(\n suffix: bytes | None = None,\n prefix: bytes | None = None,\n dir: bytes | None = None,\n text: bool = False,\n) -> tuple[int, bytes]: ...\n\n\nasync def mkstemp(\n suffix: AnyStr | None = None,\n prefix: AnyStr | None = None,\n dir: AnyStr | None = None,\n text: bool = False,\n) -> tuple[int, str | bytes]:\n """\n Asynchronously create a temporary file and return an OS-level handle and the file\n name.\n\n This function wraps `tempfile.mkstemp` and executes it in a background thread.\n\n :param suffix: Suffix to be added to the file name.\n :param prefix: Prefix to be added to the file name.\n :param dir: Directory in which the temporary file is created.\n :param text: Whether the file is opened in text mode.\n :return: A tuple containing the file descriptor and the file name.\n\n """\n return await to_thread.run_sync(tempfile.mkstemp, suffix, prefix, dir, text)\n\n\n@overload\nasync def mkdtemp(\n suffix: str | None = None,\n prefix: str | None = None,\n dir: str | None = None,\n) -> str: ...\n\n\n@overload\nasync def mkdtemp(\n suffix: bytes | None = None,\n prefix: bytes | None = None,\n dir: bytes | None = None,\n) -> bytes: ...\n\n\nasync def mkdtemp(\n suffix: AnyStr | None = None,\n prefix: AnyStr | None = None,\n dir: AnyStr | None = None,\n) -> str | bytes:\n """\n Asynchronously create a temporary directory and return its path.\n\n This function wraps `tempfile.mkdtemp` and executes it in a background thread.\n\n :param suffix: Suffix to be added to the directory name.\n :param prefix: Prefix to be added to the directory name.\n :param dir: Parent directory where the temporary directory is created.\n :return: The path of the created temporary directory.\n\n """\n return await to_thread.run_sync(tempfile.mkdtemp, suffix, prefix, dir)\n\n\nasync def gettempdir() -> str:\n """\n Asynchronously return the name of the directory used for temporary files.\n\n This function wraps `tempfile.gettempdir` and executes it in a background thread.\n\n :return: The path of the temporary directory as a string.\n\n """\n return await to_thread.run_sync(tempfile.gettempdir)\n\n\nasync def gettempdirb() -> bytes:\n """\n Asynchronously return the name of the directory used for temporary files in bytes.\n\n This function wraps `tempfile.gettempdirb` and executes it in a background thread.\n\n :return: The path of the temporary directory as bytes.\n\n """\n return await to_thread.run_sync(tempfile.gettempdirb)\n
|
.venv\Lib\site-packages\anyio\_core\_tempfile.py
|
_tempfile.py
|
Python
| 19,696 | 0.95 | 0.159091 | 0.019531 |
react-lib
| 369 |
2024-06-17T06:10:24.163034
|
Apache-2.0
| false |
c9dba88b12fb1cde94ceda4d974840f9
|
from __future__ import annotations\n\nfrom collections.abc import Awaitable, Generator\nfrom typing import Any, cast\n\nfrom ._eventloop import get_async_backend\n\n\nclass TaskInfo:\n """\n Represents an asynchronous task.\n\n :ivar int id: the unique identifier of the task\n :ivar parent_id: the identifier of the parent task, if any\n :vartype parent_id: Optional[int]\n :ivar str name: the description of the task (if any)\n :ivar ~collections.abc.Coroutine coro: the coroutine object of the task\n """\n\n __slots__ = "_name", "id", "parent_id", "name", "coro"\n\n def __init__(\n self,\n id: int,\n parent_id: int | None,\n name: str | None,\n coro: Generator[Any, Any, Any] | Awaitable[Any],\n ):\n func = get_current_task\n self._name = f"{func.__module__}.{func.__qualname__}"\n self.id: int = id\n self.parent_id: int | None = parent_id\n self.name: str | None = name\n self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, TaskInfo):\n return self.id == other.id\n\n return NotImplemented\n\n def __hash__(self) -> int:\n return hash(self.id)\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"\n\n def has_pending_cancellation(self) -> bool:\n """\n Return ``True`` if the task has a cancellation pending, ``False`` otherwise.\n\n """\n return False\n\n\ndef get_current_task() -> TaskInfo:\n """\n Return the current task.\n\n :return: a representation of the current task\n\n """\n return get_async_backend().get_current_task()\n\n\ndef get_running_tasks() -> list[TaskInfo]:\n """\n Return a list of running tasks in the current event loop.\n\n :return: a list of task info objects\n\n """\n return cast("list[TaskInfo]", get_async_backend().get_running_tasks())\n\n\nasync def wait_all_tasks_blocked() -> None:\n """Wait until all other tasks are waiting for something."""\n await get_async_backend().wait_all_tasks_blocked()\n
|
.venv\Lib\site-packages\anyio\_core\_testing.py
|
_testing.py
|
Python
| 2,118 | 0.85 | 0.179487 | 0 |
awesome-app
| 976 |
2024-08-04T19:14:15.660475
|
MIT
| true |
6456eda4fe7936943a49da6f7656f7b0
|
from __future__ import annotations\n\nfrom collections.abc import Callable, Mapping\nfrom typing import Any, TypeVar, final, overload\n\nfrom ._exceptions import TypedAttributeLookupError\n\nT_Attr = TypeVar("T_Attr")\nT_Default = TypeVar("T_Default")\nundefined = object()\n\n\ndef typed_attribute() -> Any:\n """Return a unique object, used to mark typed attributes."""\n return object()\n\n\nclass TypedAttributeSet:\n """\n Superclass for typed attribute collections.\n\n Checks that every public attribute of every subclass has a type annotation.\n """\n\n def __init_subclass__(cls) -> None:\n annotations: dict[str, Any] = getattr(cls, "__annotations__", {})\n for attrname in dir(cls):\n if not attrname.startswith("_") and attrname not in annotations:\n raise TypeError(\n f"Attribute {attrname!r} is missing its type annotation"\n )\n\n super().__init_subclass__()\n\n\nclass TypedAttributeProvider:\n """Base class for classes that wish to provide typed extra attributes."""\n\n @property\n def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:\n """\n A mapping of the extra attributes to callables that return the corresponding\n values.\n\n If the provider wraps another provider, the attributes from that wrapper should\n also be included in the returned mapping (but the wrapper may override the\n callables from the wrapped instance).\n\n """\n return {}\n\n @overload\n def extra(self, attribute: T_Attr) -> T_Attr: ...\n\n @overload\n def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...\n\n @final\n def extra(self, attribute: Any, default: object = undefined) -> object:\n """\n extra(attribute, default=undefined)\n\n Return the value of the given typed extra attribute.\n\n :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to\n look for\n :param default: the value that should be returned if no value is found for the\n attribute\n :raises ~anyio.TypedAttributeLookupError: if the search failed and no default\n value was given\n\n """\n try:\n getter = self.extra_attributes[attribute]\n except KeyError:\n if default is undefined:\n raise TypedAttributeLookupError("Attribute not found") from None\n else:\n return default\n\n return getter()\n
|
.venv\Lib\site-packages\anyio\_core\_typedattr.py
|
_typedattr.py
|
Python
| 2,508 | 0.85 | 0.246914 | 0 |
python-kit
| 87 |
2024-06-07T18:16:05.698239
|
Apache-2.0
| false |
f2e4ac2044a721fb49edd8b3f486c4a5
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_asyncio_selector_thread.cpython-313.pyc
|
_asyncio_selector_thread.cpython-313.pyc
|
Other
| 8,634 | 0.8 | 0.021505 | 0 |
vue-tools
| 343 |
2024-06-24T02:40:43.116166
|
BSD-3-Clause
| false |
56bc579a8264e90fc7d88d99e4285764
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_eventloop.cpython-313.pyc
|
_eventloop.cpython-313.pyc
|
Other
| 6,297 | 0.95 | 0.093023 | 0.013889 |
vue-tools
| 908 |
2024-08-29T08:35:32.938178
|
GPL-3.0
| false |
e877cd138c6f52630deaa0a9aa41c84c
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_exceptions.cpython-313.pyc
|
_exceptions.cpython-313.pyc
|
Other
| 6,269 | 0.95 | 0.059524 | 0 |
vue-tools
| 647 |
2024-09-29T05:26:17.043345
|
BSD-3-Clause
| false |
8058e14f919283aab3d8298a45350064
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_fileio.cpython-313.pyc
|
_fileio.cpython-313.pyc
|
Other
| 41,514 | 0.95 | 0.037736 | 0.201835 |
awesome-app
| 661 |
2024-01-19T02:57:26.133406
|
GPL-3.0
| false |
4d5947ae61699602fb197a342edc7e6c
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_resources.cpython-313.pyc
|
_resources.cpython-313.pyc
|
Other
| 922 | 0.8 | 0 | 0 |
react-lib
| 695 |
2024-10-29T00:02:36.481382
|
GPL-3.0
| false |
ae5881d0f3255050f1cf49345a182fb5
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_signals.cpython-313.pyc
|
_signals.cpython-313.pyc
|
Other
| 1,234 | 0.7 | 0.074074 | 0 |
awesome-app
| 791 |
2023-09-15T05:37:40.736997
|
GPL-3.0
| false |
5c5c6d7c4f926f4e3747ee70259a67e0
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_sockets.cpython-313.pyc
|
_sockets.cpython-313.pyc
|
Other
| 30,985 | 0.95 | 0.092975 | 0.004854 |
vue-tools
| 610 |
2024-05-02T03:52:09.316756
|
GPL-3.0
| false |
aff0c38b610207b7606caf4f1086130d
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_streams.cpython-313.pyc
|
_streams.cpython-313.pyc
|
Other
| 2,373 | 0.8 | 0.030303 | 0 |
vue-tools
| 725 |
2024-08-28T12:54:38.095041
|
Apache-2.0
| false |
ecb4c037c452c9f921dfa51e6b8642f2
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_subprocesses.cpython-313.pyc
|
_subprocesses.cpython-313.pyc
|
Other
| 9,445 | 0.8 | 0.119403 | 0 |
awesome-app
| 533 |
2024-07-21T11:35:51.314963
|
GPL-3.0
| false |
78850f23d2473166d6c3172982628c3d
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_synchronization.cpython-313.pyc
|
_synchronization.cpython-313.pyc
|
Other
| 32,215 | 0.8 | 0.099644 | 0 |
awesome-app
| 835 |
2024-07-19T12:20:51.496442
|
Apache-2.0
| false |
57b189aa86b7ecd5beebac354c5199ed
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_tasks.cpython-313.pyc
|
_tasks.cpython-313.pyc
|
Other
| 6,870 | 0.8 | 0.12963 | 0 |
python-kit
| 58 |
2025-05-14T18:36:02.868256
|
Apache-2.0
| false |
191f47697be434ff9901270956c255d8
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_tempfile.cpython-313.pyc
|
_tempfile.cpython-313.pyc
|
Other
| 27,902 | 0.95 | 0.090062 | 0.003448 |
node-utils
| 919 |
2024-05-03T19:40:24.943947
|
Apache-2.0
| false |
637641d75b665a0a1206b2e285da854c
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_testing.cpython-313.pyc
|
_testing.cpython-313.pyc
|
Other
| 3,594 | 0.8 | 0.085106 | 0 |
react-lib
| 24 |
2024-11-30T18:32:10.008138
|
Apache-2.0
| true |
846bf8330068cfb06c7e293e3a2c0256
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\_typedattr.cpython-313.pyc
|
_typedattr.cpython-313.pyc
|
Other
| 3,776 | 0.95 | 0.166667 | 0 |
python-kit
| 569 |
2025-06-26T03:03:18.045593
|
Apache-2.0
| false |
00347416d95e28c823f55cd357e4ecdc
|
\n\n
|
.venv\Lib\site-packages\anyio\_core\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 186 | 0.7 | 0 | 0 |
node-utils
| 556 |
2025-05-16T15:31:13.039285
|
Apache-2.0
| false |
224ef3624bd615019a4c8bb610ada1b8
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\from_thread.cpython-313.pyc
|
from_thread.cpython-313.pyc
|
Other
| 23,757 | 0.95 | 0.093985 | 0.008696 |
vue-tools
| 111 |
2025-03-16T15:37:20.252106
|
BSD-3-Clause
| false |
6f2c9f2c8ed7ebb7a4dfc9e58b6bf3a8
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\lowlevel.cpython-313.pyc
|
lowlevel.cpython-313.pyc
|
Other
| 7,004 | 0.8 | 0.072165 | 0.012658 |
awesome-app
| 670 |
2025-02-03T10:40:47.914462
|
Apache-2.0
| false |
154a2a133241c984a1e50ae64e301de2
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\pytest_plugin.cpython-313.pyc
|
pytest_plugin.cpython-313.pyc
|
Other
| 13,728 | 0.95 | 0.078947 | 0 |
awesome-app
| 876 |
2023-11-23T05:36:17.939060
|
Apache-2.0
| true |
bd6c086263c00f0c0b7384375bca1536
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\to_interpreter.cpython-313.pyc
|
to_interpreter.cpython-313.pyc
|
Other
| 9,196 | 0.95 | 0.073394 | 0.020619 |
awesome-app
| 288 |
2024-01-17T05:12:23.866575
|
BSD-3-Clause
| false |
e14fd3e7fd37eaeab93dd7785b1656b4
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\to_process.cpython-313.pyc
|
to_process.cpython-313.pyc
|
Other
| 11,972 | 0.95 | 0.04918 | 0.026786 |
node-utils
| 105 |
2023-11-05T03:28:57.486641
|
BSD-3-Clause
| false |
65d47b8bda93522fe12ae92301259fa7
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\to_thread.cpython-313.pyc
|
to_thread.cpython-313.pyc
|
Other
| 2,846 | 0.95 | 0.166667 | 0.027778 |
python-kit
| 706 |
2023-11-04T01:34:51.539860
|
GPL-3.0
| false |
5dde93131b0196fdb5c5cac03616ce07
|
\n\n
|
.venv\Lib\site-packages\anyio\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 3,597 | 0.8 | 0 | 0 |
vue-tools
| 271 |
2025-01-06T01:01:17.151453
|
MIT
| false |
dd806783152a9db1e110079f2e82a05c
|
[pytest11]\nanyio = anyio.pytest_plugin\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\entry_points.txt
|
entry_points.txt
|
Other
| 39 | 0.5 | 0 | 0 |
react-lib
| 162 |
2023-10-24T19:38:09.307467
|
MIT
| false |
b5e037147bc565eba99a2f4142dd7990
|
pip\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
node-utils
| 61 |
2025-02-05T01:46:53.258772
|
BSD-3-Clause
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
The MIT License (MIT)\n\nCopyright (c) 2018 Alex Grönholm\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the "Software"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\LICENSE
|
LICENSE
|
Other
| 1,081 | 0.7 | 0 | 0 |
node-utils
| 463 |
2025-04-07T12:42:29.461243
|
Apache-2.0
| false |
c0a769411d2af7894099e8ff75058c9f
|
Metadata-Version: 2.2\nName: anyio\nVersion: 4.9.0\nSummary: High level compatibility layer for multiple asynchronous event loop implementations\nAuthor-email: Alex Grönholm <[email protected]>\nLicense: MIT\nProject-URL: Documentation, https://anyio.readthedocs.io/en/latest/\nProject-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html\nProject-URL: Source code, https://github.com/agronholm/anyio\nProject-URL: Issue tracker, https://github.com/agronholm/anyio/issues\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: MIT License\nClassifier: Framework :: AnyIO\nClassifier: Typing :: Typed\nClassifier: Programming Language :: Python\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: 3.13\nRequires-Python: >=3.9\nDescription-Content-Type: text/x-rst\nLicense-File: LICENSE\nRequires-Dist: exceptiongroup>=1.0.2; python_version < "3.11"\nRequires-Dist: idna>=2.8\nRequires-Dist: sniffio>=1.1\nRequires-Dist: typing_extensions>=4.5; python_version < "3.13"\nProvides-Extra: trio\nRequires-Dist: trio>=0.26.1; extra == "trio"\nProvides-Extra: test\nRequires-Dist: anyio[trio]; extra == "test"\nRequires-Dist: blockbuster>=1.5.23; extra == "test"\nRequires-Dist: coverage[toml]>=7; extra == "test"\nRequires-Dist: exceptiongroup>=1.2.0; extra == "test"\nRequires-Dist: hypothesis>=4.0; extra == "test"\nRequires-Dist: psutil>=5.9; extra == "test"\nRequires-Dist: pytest>=7.0; extra == "test"\nRequires-Dist: trustme; extra == "test"\nRequires-Dist: truststore>=0.9.1; python_version >= "3.10" and extra == "test"\nRequires-Dist: uvloop>=0.21; (platform_python_implementation == "CPython" and platform_system != "Windows" and python_version < "3.14") and extra == "test"\nProvides-Extra: doc\nRequires-Dist: packaging; extra == "doc"\nRequires-Dist: Sphinx~=8.2; extra == "doc"\nRequires-Dist: sphinx_rtd_theme; extra == "doc"\nRequires-Dist: sphinx-autodoc-typehints>=1.2.0; extra == "doc"\n\n.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg\n :target: https://github.com/agronholm/anyio/actions/workflows/test.yml\n :alt: Build Status\n.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master\n :target: https://coveralls.io/github/agronholm/anyio?branch=master\n :alt: Code Coverage\n.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest\n :target: https://anyio.readthedocs.io/en/latest/?badge=latest\n :alt: Documentation\n.. image:: https://badges.gitter.im/gitterHQ/gitter.svg\n :target: https://gitter.im/python-trio/AnyIO\n :alt: Gitter chat\n\nAnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or\ntrio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony\nwith the native SC of trio itself.\n\nApplications and libraries written against AnyIO's API will run unmodified on either asyncio_ or\ntrio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full\nrefactoring necessary. It will blend in with the native libraries of your chosen backend.\n\nDocumentation\n-------------\n\nView full documentation at: https://anyio.readthedocs.io/\n\nFeatures\n--------\n\nAnyIO offers the following functionality:\n\n* Task groups (nurseries_ in trio terminology)\n* High-level networking (TCP, UDP and UNIX sockets)\n\n * `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python\n 3.8)\n * async/await style UDP sockets (unlike asyncio where you still have to use Transports and\n Protocols)\n\n* A versatile API for byte streams and object streams\n* Inter-task synchronization and communication (locks, conditions, events, semaphores, object\n streams)\n* Worker threads\n* Subprocesses\n* Asynchronous file I/O (using worker threads)\n* Signal handling\n\nAnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.\nIt even works with the popular Hypothesis_ library.\n\n.. _asyncio: https://docs.python.org/3/library/asyncio.html\n.. _trio: https://github.com/python-trio/trio\n.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency\n.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning\n.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs\n.. _pytest: https://docs.pytest.org/en/latest/\n.. _Hypothesis: https://hypothesis.works/\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\METADATA
|
METADATA
|
Other
| 4,682 | 0.95 | 0.028571 | 0.107527 |
awesome-app
| 165 |
2024-06-14T12:49:51.855365
|
GPL-3.0
| false |
2258c9b8686781cf21bb9bfce69b289a
|
anyio-4.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nanyio-4.9.0.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081\nanyio-4.9.0.dist-info/METADATA,sha256=vvkWPXXTbrpTCFK7zdcYwQcSQhx6Q4qITM9t_PEQCrY,4682\nanyio-4.9.0.dist-info/RECORD,,\nanyio-4.9.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91\nanyio-4.9.0.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39\nanyio-4.9.0.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6\nanyio/__init__.py,sha256=t8bZuNXa5ncwXBaNKbv48BDgZt48RT_zCEtrnPmjNU8,4993\nanyio/__pycache__/__init__.cpython-313.pyc,,\nanyio/__pycache__/from_thread.cpython-313.pyc,,\nanyio/__pycache__/lowlevel.cpython-313.pyc,,\nanyio/__pycache__/pytest_plugin.cpython-313.pyc,,\nanyio/__pycache__/to_interpreter.cpython-313.pyc,,\nanyio/__pycache__/to_process.cpython-313.pyc,,\nanyio/__pycache__/to_thread.cpython-313.pyc,,\nanyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nanyio/_backends/__pycache__/__init__.cpython-313.pyc,,\nanyio/_backends/__pycache__/_asyncio.cpython-313.pyc,,\nanyio/_backends/__pycache__/_trio.cpython-313.pyc,,\nanyio/_backends/_asyncio.py,sha256=AT1oaTfCE-9YFxooMlvld2yDqY5U2A-ANMcBDh9eRfI,93455\nanyio/_backends/_trio.py,sha256=HVfDqRGQ7Xj3JfTcYdgzmC7pZEplqU4NOO5kxNNSZnk,40429\nanyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nanyio/_core/__pycache__/__init__.cpython-313.pyc,,\nanyio/_core/__pycache__/_asyncio_selector_thread.cpython-313.pyc,,\nanyio/_core/__pycache__/_eventloop.cpython-313.pyc,,\nanyio/_core/__pycache__/_exceptions.cpython-313.pyc,,\nanyio/_core/__pycache__/_fileio.cpython-313.pyc,,\nanyio/_core/__pycache__/_resources.cpython-313.pyc,,\nanyio/_core/__pycache__/_signals.cpython-313.pyc,,\nanyio/_core/__pycache__/_sockets.cpython-313.pyc,,\nanyio/_core/__pycache__/_streams.cpython-313.pyc,,\nanyio/_core/__pycache__/_subprocesses.cpython-313.pyc,,\nanyio/_core/__pycache__/_synchronization.cpython-313.pyc,,\nanyio/_core/__pycache__/_tasks.cpython-313.pyc,,\nanyio/_core/__pycache__/_tempfile.cpython-313.pyc,,\nanyio/_core/__pycache__/_testing.cpython-313.pyc,,\nanyio/_core/__pycache__/_typedattr.cpython-313.pyc,,\nanyio/_core/_asyncio_selector_thread.py,sha256=2PdxFM3cs02Kp6BSppbvmRT7q7asreTW5FgBxEsflBo,5626\nanyio/_core/_eventloop.py,sha256=t_tAwBFPjF8jrZGjlJ6bbYy6KA3bjsbZxV9mvh9t1i0,4695\nanyio/_core/_exceptions.py,sha256=RlPRlwastdmfDPoskdXNO6SI8_l3fclA2wtW6cokU9I,3503\nanyio/_core/_fileio.py,sha256=qFZhkLIz0cGXluvih_vcPUTucgq8UFVgsTCtYbijZIg,23340\nanyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435\nanyio/_core/_signals.py,sha256=vulT1M1xdLYtAR-eY5TamIgaf1WTlOwOrMGwswlTTr8,905\nanyio/_core/_sockets.py,sha256=5Okc_UThGDEN9KCnsIhqWPRHBNuSy6b4NmG1i51TVF4,27150\nanyio/_core/_streams.py,sha256=OnaKgoDD-FcMSwLvkoAUGP51sG2ZdRvMpxt9q2w1gYA,1804\nanyio/_core/_subprocesses.py,sha256=EXm5igL7dj55iYkPlbYVAqtbqxJxjU-6OndSTIx9SRg,8047\nanyio/_core/_synchronization.py,sha256=DwUh8Tl6cG_UMVC_GyzPoC_U9BpfDfjMl9SINSxcZN4,20320\nanyio/_core/_tasks.py,sha256=f3CuWwo06cCZ6jaOv-JHFKWkgpgf2cvaF25Oh4augMA,4757\nanyio/_core/_tempfile.py,sha256=s-_ucacXbxBH5Bo5eo65lN0lPwZQd5B8yNN_9nARpCM,19696\nanyio/_core/_testing.py,sha256=YUGwA5cgFFbUTv4WFd7cv_BSVr4ryTtPp8owQA3JdWE,2118\nanyio/_core/_typedattr.py,sha256=P4ozZikn3-DbpoYcvyghS_FOYAgbmUxeoU8-L_07pZM,2508\nanyio/abc/__init__.py,sha256=c2OQbTCS_fQowviMXanLPh8m29ccwkXmpDr7uyNZYOo,2652\nanyio/abc/__pycache__/__init__.cpython-313.pyc,,\nanyio/abc/__pycache__/_eventloop.cpython-313.pyc,,\nanyio/abc/__pycache__/_resources.cpython-313.pyc,,\nanyio/abc/__pycache__/_sockets.cpython-313.pyc,,\nanyio/abc/__pycache__/_streams.cpython-313.pyc,,\nanyio/abc/__pycache__/_subprocesses.cpython-313.pyc,,\nanyio/abc/__pycache__/_tasks.cpython-313.pyc,,\nanyio/abc/__pycache__/_testing.cpython-313.pyc,,\nanyio/abc/_eventloop.py,sha256=UmL8DZCvQTgxzmyBZcGm9kWj9VQY8BMWueLh5S8yWN4,9682\nanyio/abc/_resources.py,sha256=DrYvkNN1hH6Uvv5_5uKySvDsnknGVDe8FCKfko0VtN8,783\nanyio/abc/_sockets.py,sha256=KhWtJxan8jpBXKwPaFeQzI4iRXdFaOIn0HXtDZnaO7U,6262\nanyio/abc/_streams.py,sha256=He_JpkAW2g5veOzcUq0XsRC2nId_i35L-d8cs7Uj1ZQ,6598\nanyio/abc/_subprocesses.py,sha256=cumAPJTktOQtw63IqG0lDpyZqu_l1EElvQHMiwJgL08,2067\nanyio/abc/_tasks.py,sha256=yJWbMwowvqjlAX4oJ3l9Is1w-zwynr2lX1Z02AWJqsY,3080\nanyio/abc/_testing.py,sha256=tBJUzkSfOXJw23fe8qSJ03kJlShOYjjaEyFB6k6MYT8,1821\nanyio/from_thread.py,sha256=MbXHZpgM9wgsRkbGhMNMomEGYj7Y_QYq6a5BZ3c5Ev8,17478\nanyio/lowlevel.py,sha256=nkgmW--SdxGVp0cmLUYazjkigveRm5HY7-gW8Bpp9oY,4169\nanyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nanyio/pytest_plugin.py,sha256=qXNwk9Pa7hPQKWocgLl9qijqKGMkGzdH2wJa-jPkGUM,9375\nanyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nanyio/streams/__pycache__/__init__.cpython-313.pyc,,\nanyio/streams/__pycache__/buffered.cpython-313.pyc,,\nanyio/streams/__pycache__/file.cpython-313.pyc,,\nanyio/streams/__pycache__/memory.cpython-313.pyc,,\nanyio/streams/__pycache__/stapled.cpython-313.pyc,,\nanyio/streams/__pycache__/text.cpython-313.pyc,,\nanyio/streams/__pycache__/tls.cpython-313.pyc,,\nanyio/streams/buffered.py,sha256=UCldKC168YuLvT7n3HtNPnQ2iWAMSTYQWbZvzLwMwkM,4500\nanyio/streams/file.py,sha256=6uoTNb5KbMoj-6gS3_xrrL8uZN8Q4iIvOS1WtGyFfKw,4383\nanyio/streams/memory.py,sha256=o1OVVx0OooteTTe2GytJreum93Ucuw5s4cAsr3X0-Ag,10560\nanyio/streams/stapled.py,sha256=U09pCrmOw9kkNhe6tKopsm1QIMT1lFTFvtb-A7SIe4k,4302\nanyio/streams/text.py,sha256=6x8w8xlfCZKTUWQoJiMPoMhSSJFUBRKgoBNSBtbd9yg,5094\nanyio/streams/tls.py,sha256=HxzpVmUgo8SUSIBass_lvef1pAI1uRSrnysM3iEGzl4,13199\nanyio/to_interpreter.py,sha256=UhuNCIucCRN7ZtyJg35Mlamzs1JpgDvK4xnL4TDWrAo,6527\nanyio/to_process.py,sha256=ZvruelRM-HNmqDaql4sdNODg2QD_uSlwSCxnV4OhsfQ,9595\nanyio/to_thread.py,sha256=WM2JQ2MbVsd5D5CM08bQiTwzZIvpsGjfH1Fy247KoDQ,2396\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\RECORD
|
RECORD
|
Other
| 5,964 | 0.7 | 0 | 0 |
react-lib
| 43 |
2024-04-01T19:31:51.275817
|
MIT
| false |
5d0c8e9aed03c334a288cc897afb9d43
|
anyio\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\top_level.txt
|
top_level.txt
|
Other
| 6 | 0.5 | 0 | 0 |
python-kit
| 510 |
2023-10-10T08:49:32.095908
|
GPL-3.0
| false |
ed5a97064b32e2de132a87802c6ef7f7
|
Wheel-Version: 1.0\nGenerator: setuptools (76.0.0)\nRoot-Is-Purelib: true\nTag: py3-none-any\n\n
|
.venv\Lib\site-packages\anyio-4.9.0.dist-info\WHEEL
|
WHEEL
|
Other
| 91 | 0.5 | 0 | 0 |
node-utils
| 548 |
2023-08-20T12:14:31.941233
|
MIT
| false |
1b247ebaec32fe9d45aeaf7545cfd008
|
# SPDX-License-Identifier: MIT\n\nfrom __future__ import annotations\n\n\nclass Argon2Error(Exception):\n """\n Superclass of all argon2 exceptions.\n\n Never thrown directly.\n """\n\n\nclass VerificationError(Argon2Error):\n """\n Verification failed.\n\n You can find the original error message from Argon2 in ``args[0]``.\n """\n\n\nclass VerifyMismatchError(VerificationError):\n """\n The secret does not match the hash.\n\n Subclass of :exc:`argon2.exceptions.VerificationError`.\n\n .. versionadded:: 16.1.0\n """\n\n\nclass HashingError(Argon2Error):\n """\n Raised if hashing failed.\n\n You can find the original error message from Argon2 in ``args[0]``.\n """\n\n\nclass InvalidHashError(ValueError):\n """\n Raised if the hash is invalid before passing it to Argon2.\n\n .. versionadded:: 23.1.0\n As a replacement for :exc:`argon2.exceptions.InvalidHash`.\n """\n\n\nclass UnsupportedParametersError(ValueError):\n """\n Raised if the current platform does not support the parameters.\n\n For example, in WebAssembly parallelism must be set to 1.\n\n .. versionadded:: 25.1.0\n """\n\n\nInvalidHash = InvalidHashError\n"""\nDeprecated alias for :class:`InvalidHashError`.\n\n.. versionadded:: 18.2.0\n.. deprecated:: 23.1.0\n Use :exc:`argon2.exceptions.InvalidHashError` instead.\n"""\n
|
.venv\Lib\site-packages\argon2\exceptions.py
|
exceptions.py
|
Python
| 1,322 | 0.95 | 0.181818 | 0.02381 |
awesome-app
| 245 |
2023-11-10T04:25:51.438064
|
Apache-2.0
| false |
c83de020ec00b8748c00f43f1d221b13
|
# SPDX-License-Identifier: MIT\n\n"""\nThis module offers access to standardized parameters that you can load using\n:meth:`argon2.PasswordHasher.from_parameters()`. See the `source code\n<https://github.com/hynek/argon2-cffi/blob/main/src/argon2/profiles.py>`_ for\nconcrete values and :doc:`parameters` for more information.\n\n.. versionadded:: 21.2.0\n"""\n\nfrom __future__ import annotations\n\nimport dataclasses\n\nfrom ._utils import Parameters, _is_wasm\nfrom .low_level import Type\n\n\ndef get_default_parameters() -> Parameters:\n """\n Create default parameters for current platform.\n\n Returns:\n Default, compatible, parameters for current platform.\n\n .. versionadded:: 25.1.0\n """\n params = RFC_9106_LOW_MEMORY\n\n if _is_wasm():\n params = dataclasses.replace(params, parallelism=1)\n\n return params\n\n\n# FIRST RECOMMENDED option per RFC 9106.\nRFC_9106_HIGH_MEMORY = Parameters(\n type=Type.ID,\n version=19,\n salt_len=16,\n hash_len=32,\n time_cost=1,\n memory_cost=2097152, # 2 GiB\n parallelism=4,\n)\n\n# SECOND RECOMMENDED option per RFC 9106.\nRFC_9106_LOW_MEMORY = Parameters(\n type=Type.ID,\n version=19,\n salt_len=16,\n hash_len=32,\n time_cost=3,\n memory_cost=65536, # 64 MiB\n parallelism=4,\n)\n\n# The pre-RFC defaults in argon2-cffi 18.2.0 - 21.1.0.\nPRE_21_2 = Parameters(\n type=Type.ID,\n version=19,\n salt_len=16,\n hash_len=16,\n time_cost=2,\n memory_cost=102400, # 100 MiB\n parallelism=8,\n)\n\n# Only for testing!\nCHEAPEST = Parameters(\n type=Type.ID,\n version=19,\n salt_len=8,\n hash_len=4,\n time_cost=1,\n memory_cost=8,\n parallelism=1,\n)\n
|
.venv\Lib\site-packages\argon2\profiles.py
|
profiles.py
|
Python
| 1,650 | 0.95 | 0.088608 | 0.079365 |
node-utils
| 722 |
2025-03-28T07:12:08.365156
|
MIT
| false |
f40c407d29501be03ab81897e8c053f2
|
# SPDX-License-Identifier: MIT\n\n"""\nLegacy mid-level functions.\n"""\n\nfrom __future__ import annotations\n\nimport os\nimport warnings\n\nfrom typing import Literal\n\nfrom ._password_hasher import (\n DEFAULT_HASH_LENGTH,\n DEFAULT_MEMORY_COST,\n DEFAULT_PARALLELISM,\n DEFAULT_RANDOM_SALT_LENGTH,\n DEFAULT_TIME_COST,\n)\nfrom .low_level import Type, hash_secret, hash_secret_raw, verify_secret\n\n\n_INSTEAD = " is deprecated, use argon2.PasswordHasher instead"\n\n\ndef hash_password(\n password: bytes,\n salt: bytes | None = None,\n time_cost: int = DEFAULT_TIME_COST,\n memory_cost: int = DEFAULT_MEMORY_COST,\n parallelism: int = DEFAULT_PARALLELISM,\n hash_len: int = DEFAULT_HASH_LENGTH,\n type: Type = Type.I,\n) -> bytes:\n """\n Legacy alias for :func:`argon2.low_level.hash_secret` with default\n parameters.\n\n .. deprecated:: 16.0.0\n Use :class:`argon2.PasswordHasher` for passwords.\n """\n warnings.warn(\n "argon2.hash_password" + _INSTEAD, DeprecationWarning, stacklevel=2\n )\n if salt is None:\n salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)\n return hash_secret(\n password, salt, time_cost, memory_cost, parallelism, hash_len, type\n )\n\n\ndef hash_password_raw(\n password: bytes,\n salt: bytes | None = None,\n time_cost: int = DEFAULT_TIME_COST,\n memory_cost: int = DEFAULT_MEMORY_COST,\n parallelism: int = DEFAULT_PARALLELISM,\n hash_len: int = DEFAULT_HASH_LENGTH,\n type: Type = Type.I,\n) -> bytes:\n """\n Legacy alias for :func:`argon2.low_level.hash_secret_raw` with default\n parameters.\n\n .. deprecated:: 16.0.0\n Use :class:`argon2.PasswordHasher` for passwords.\n """\n warnings.warn(\n "argon2.hash_password_raw" + _INSTEAD, DeprecationWarning, stacklevel=2\n )\n if salt is None:\n salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)\n return hash_secret_raw(\n password, salt, time_cost, memory_cost, parallelism, hash_len, type\n )\n\n\ndef verify_password(\n hash: bytes, password: bytes, type: Type = Type.I\n) -> Literal[True]:\n """\n Legacy alias for :func:`argon2.low_level.verify_secret` with default\n parameters.\n\n .. deprecated:: 16.0.0\n Use :class:`argon2.PasswordHasher` for passwords.\n """\n warnings.warn(\n "argon2.verify_password" + _INSTEAD, DeprecationWarning, stacklevel=2\n )\n return verify_secret(hash, password, type)\n
|
.venv\Lib\site-packages\argon2\_legacy.py
|
_legacy.py
|
Python
| 2,416 | 0.95 | 0.152174 | 0.013158 |
awesome-app
| 343 |
2024-09-04T09:20:47.435462
|
MIT
| false |
7149eb4fd43b1337dc26463a7dde6901
|
# SPDX-License-Identifier: MIT\n\nfrom __future__ import annotations\n\nimport platform\nimport sys\n\nfrom dataclasses import dataclass\nfrom typing import Any\n\nfrom .exceptions import InvalidHashError, UnsupportedParametersError\nfrom .low_level import Type\n\n\nNoneType = type(None)\n\n\ndef _check_types(**kw: Any) -> str | None:\n """\n Check each ``name: (value, types)`` in *kw*.\n\n Returns a human-readable string of all violations or `None``.\n """\n errors = []\n for name, (value, types) in kw.items():\n if not isinstance(value, types):\n if isinstance(types, tuple):\n types = ", or ".join(t.__name__ for t in types)\n else:\n types = types.__name__\n errors.append(\n f"'{name}' must be a {types} (got {type(value).__name__})"\n )\n\n if errors != []:\n return ", ".join(errors) + "."\n\n return None\n\n\ndef _is_wasm() -> bool:\n return sys.platform == "emscripten" or platform.machine() in [\n "wasm32",\n "wasm64",\n ]\n\n\ndef _decoded_str_len(length: int) -> int:\n """\n Compute how long an encoded string of length *l* becomes.\n """\n rem = length % 4\n\n if rem == 3:\n last_group_len = 2\n elif rem == 2:\n last_group_len = 1\n else:\n last_group_len = 0\n\n return length // 4 * 3 + last_group_len\n\n\n@dataclass\nclass Parameters:\n """\n Argon2 hash parameters.\n\n See :doc:`parameters` on how to pick them.\n\n Attributes:\n type: Hash type.\n\n version: Argon2 version.\n\n salt_len: Length of the salt in bytes.\n\n hash_len: Length of the hash in bytes.\n\n time_cost: Time cost in iterations.\n\n memory_cost: Memory cost in kibibytes.\n\n parallelism: Number of parallel threads.\n\n .. versionadded:: 18.2.0\n """\n\n type: Type\n version: int\n salt_len: int\n hash_len: int\n time_cost: int\n memory_cost: int\n parallelism: int\n\n __slots__ = (\n "hash_len",\n "memory_cost",\n "parallelism",\n "salt_len",\n "time_cost",\n "type",\n "version",\n )\n\n\n_NAME_TO_TYPE = {"argon2id": Type.ID, "argon2i": Type.I, "argon2d": Type.D}\n_REQUIRED_KEYS = sorted(("v", "m", "t", "p"))\n\n\ndef extract_parameters(hash: str) -> Parameters:\n """\n Extract parameters from an encoded *hash*.\n\n Args:\n hash: An encoded Argon2 hash string.\n\n Returns:\n The parameters used to create the hash.\n\n .. versionadded:: 18.2.0\n """\n parts = hash.split("$")\n\n # Backwards compatibility for Argon v1.2 hashes\n if len(parts) == 5:\n parts.insert(2, "v=18")\n\n if len(parts) != 6:\n raise InvalidHashError\n\n if parts[0]:\n raise InvalidHashError\n\n try:\n type = _NAME_TO_TYPE[parts[1]]\n\n kvs = {\n k: int(v)\n for k, v in (\n s.split("=") for s in [parts[2], *parts[3].split(",")]\n )\n }\n except Exception: # noqa: BLE001\n raise InvalidHashError from None\n\n if sorted(kvs.keys()) != _REQUIRED_KEYS:\n raise InvalidHashError\n\n return Parameters(\n type=type,\n salt_len=_decoded_str_len(len(parts[4])),\n hash_len=_decoded_str_len(len(parts[5])),\n version=kvs["v"],\n time_cost=kvs["t"],\n memory_cost=kvs["m"],\n parallelism=kvs["p"],\n )\n\n\ndef validate_params_for_platform(params: Parameters) -> None:\n """\n Validate *params* against current platform.\n\n Args:\n params: Parameters to be validated\n\n Returns:\n None\n """\n if _is_wasm() and params.parallelism != 1:\n msg = "In WebAssembly environments `parallelism` must be 1."\n raise UnsupportedParametersError(msg)\n
|
.venv\Lib\site-packages\argon2\_utils.py
|
_utils.py
|
Python
| 3,751 | 0.95 | 0.12069 | 0.015873 |
python-kit
| 816 |
2025-06-29T08:40:02.944591
|
BSD-3-Clause
| false |
b0838ac22c991548dc7724a86b0acd71
|
# SPDX-License-Identifier: MIT\n\n"""\nArgon2 for Python\n"""\n\nfrom . import exceptions, low_level, profiles\nfrom ._legacy import hash_password, hash_password_raw, verify_password\nfrom ._password_hasher import (\n DEFAULT_HASH_LENGTH,\n DEFAULT_MEMORY_COST,\n DEFAULT_PARALLELISM,\n DEFAULT_RANDOM_SALT_LENGTH,\n DEFAULT_TIME_COST,\n PasswordHasher,\n)\nfrom ._utils import Parameters, extract_parameters\nfrom .low_level import Type\n\n\n__title__ = "argon2-cffi"\n\n__author__ = "Hynek Schlawack"\n__copyright__ = "Copyright (c) 2015 " + __author__\n__license__ = "MIT"\n\n\n__all__ = [\n "DEFAULT_HASH_LENGTH",\n "DEFAULT_MEMORY_COST",\n "DEFAULT_PARALLELISM",\n "DEFAULT_RANDOM_SALT_LENGTH",\n "DEFAULT_TIME_COST",\n "Parameters",\n "PasswordHasher",\n "Type",\n "exceptions",\n "extract_parameters",\n "hash_password",\n "hash_password_raw",\n "low_level",\n "profiles",\n "verify_password",\n]\n\n\ndef __getattr__(name: str) -> str:\n dunder_to_metadata = {\n "__version__": "version",\n "__description__": "summary",\n "__uri__": "",\n "__url__": "",\n "__email__": "",\n }\n if name not in dunder_to_metadata:\n msg = f"module {__name__} has no attribute {name}"\n raise AttributeError(msg)\n\n import warnings\n\n from importlib.metadata import metadata\n\n warnings.warn(\n f"Accessing argon2.{name} is deprecated and will be "\n "removed in a future release. Use importlib.metadata directly "\n "to query for argon2-cffi's packaging metadata.",\n DeprecationWarning,\n stacklevel=2,\n )\n\n meta = metadata("argon2-cffi")\n\n if name in ("__uri__", "__url__"):\n return meta["Project-URL"].split(" ", 1)[-1]\n\n if name == "__email__":\n return meta["Author-email"].split("<", 1)[1].rstrip(">")\n\n return meta[dunder_to_metadata[name]]\n
|
.venv\Lib\site-packages\argon2\__init__.py
|
__init__.py
|
Python
| 1,869 | 0.95 | 0.075949 | 0.015873 |
vue-tools
| 348 |
2024-04-09T14:42:12.222205
|
MIT
| false |
9ffa0280cf1cbd21bd303d1ae5967cf1
|
# SPDX-License-Identifier: MIT\n\nfrom __future__ import annotations\n\nimport argparse\nimport sys\nimport timeit\n\nfrom . import (\n DEFAULT_HASH_LENGTH,\n DEFAULT_MEMORY_COST,\n DEFAULT_PARALLELISM,\n DEFAULT_TIME_COST,\n PasswordHasher,\n profiles,\n)\n\n\ndef main(argv: list[str]) -> None:\n parser = argparse.ArgumentParser(\n description="Benchmark Argon2.",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n "-n", type=int, default=100, help="Number of iterations to measure."\n )\n parser.add_argument(\n "-t", type=int, help="`time_cost`", default=DEFAULT_TIME_COST\n )\n parser.add_argument(\n "-m", type=int, help="`memory_cost`", default=DEFAULT_MEMORY_COST\n )\n parser.add_argument(\n "-p", type=int, help="`parallelism`", default=DEFAULT_PARALLELISM\n )\n parser.add_argument(\n "-l", type=int, help="`hash_length`", default=DEFAULT_HASH_LENGTH\n )\n parser.add_argument(\n "--profile",\n type=str,\n help="A profile from `argon2.profiles. Takes precedence.",\n default=None,\n )\n\n args = parser.parse_args(argv[1:])\n\n password = b"secret"\n if args.profile:\n ph = PasswordHasher.from_parameters(\n getattr(profiles, args.profile.upper())\n )\n else:\n ph = PasswordHasher(\n time_cost=args.t,\n memory_cost=args.m,\n parallelism=args.p,\n hash_len=args.l,\n )\n hash = ph.hash(password)\n\n print(f"Running Argon2id {args.n} times with:")\n\n for name, value, units in [\n ("hash_len", ph.hash_len, "bytes"),\n ("memory_cost", ph.memory_cost, "KiB"),\n ("parallelism", ph.parallelism, "threads"),\n ("time_cost", ph.time_cost, "iterations"),\n ]:\n print(f"{name}: {value} {units}")\n\n print("\nMeasuring...")\n duration = timeit.timeit(\n f"ph.verify({hash!r}, {password!r})",\n setup=f"""\\nfrom argon2 import PasswordHasher\n\nph = PasswordHasher(\n time_cost={args.t!r},\n memory_cost={args.m!r},\n parallelism={args.p!r},\n hash_len={args.l!r},\n)\ngc.enable()""",\n number=args.n,\n )\n print(f"\n{duration / args.n * 1000:.1f}ms per password verification")\n\n\nif __name__ == "__main__": # pragma: no cover\n main(sys.argv)\n
|
.venv\Lib\site-packages\argon2\__main__.py
|
__main__.py
|
Python
| 2,332 | 0.95 | 0.043956 | 0.012821 |
awesome-app
| 218 |
2025-06-07T06:21:18.467703
|
GPL-3.0
| false |
4e9f9bc935df56591d842e07d660b2ce
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\exceptions.cpython-313.pyc
|
exceptions.cpython-313.pyc
|
Other
| 2,088 | 0.95 | 0.108108 | 0 |
awesome-app
| 341 |
2023-07-21T12:59:02.905377
|
BSD-3-Clause
| false |
c85be2c6fe6ac33d6def09ed6dcc7bbe
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\low_level.cpython-313.pyc
|
low_level.cpython-313.pyc
|
Other
| 7,523 | 0.95 | 0.076471 | 0.007407 |
node-utils
| 994 |
2024-04-28T14:38:28.190615
|
MIT
| false |
1b07f0aabf887756907692fde8f48f44
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\profiles.cpython-313.pyc
|
profiles.cpython-313.pyc
|
Other
| 1,652 | 0.8 | 0.102564 | 0 |
react-lib
| 335 |
2024-03-17T07:51:15.445042
|
MIT
| false |
3870bb5cd6c21795ac56ab70c9fcfec0
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\_legacy.cpython-313.pyc
|
_legacy.cpython-313.pyc
|
Other
| 3,017 | 0.95 | 0.163636 | 0 |
awesome-app
| 608 |
2024-11-23T09:49:22.798962
|
BSD-3-Clause
| false |
d516e9fb6efab646ee1966ec5eb1ea80
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\_password_hasher.cpython-313.pyc
|
_password_hasher.cpython-313.pyc
|
Other
| 10,286 | 0.95 | 0.077348 | 0.013699 |
node-utils
| 745 |
2024-03-16T12:11:47.851808
|
BSD-3-Clause
| false |
e2885086017629fb39b9c6676aec54ae
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\_utils.cpython-313.pyc
|
_utils.cpython-313.pyc
|
Other
| 5,576 | 0.8 | 0 | 0 |
react-lib
| 626 |
2024-11-07T05:37:10.545811
|
BSD-3-Clause
| false |
30eafe7a9898749c2b8bd27eed40e9a3
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 2,190 | 0.95 | 0.071429 | 0 |
node-utils
| 105 |
2024-07-21T02:29:08.189521
|
GPL-3.0
| false |
2b50a2d8f746db4bd800168b0ba460fa
|
\n\n
|
.venv\Lib\site-packages\argon2\__pycache__\__main__.cpython-313.pyc
|
__main__.cpython-313.pyc
|
Other
| 3,502 | 0.95 | 0 | 0 |
awesome-app
| 970 |
2023-07-20T17:56:06.784635
|
GPL-3.0
| false |
3145e2f925ef18195bf79b54853cc73d
|
pip\n
|
.venv\Lib\site-packages\argon2_cffi-25.1.0.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
python-kit
| 920 |
2023-10-26T08:06:20.806853
|
MIT
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
Metadata-Version: 2.4\nName: argon2-cffi\nVersion: 25.1.0\nSummary: Argon2 for Python\nProject-URL: Documentation, https://argon2-cffi.readthedocs.io/\nProject-URL: Changelog, https://github.com/hynek/argon2-cffi/blob/main/CHANGELOG.md\nProject-URL: GitHub, https://github.com/hynek/argon2-cffi\nProject-URL: Funding, https://github.com/sponsors/hynek\nProject-URL: Tidelift, https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek\nAuthor-email: Hynek Schlawack <[email protected]>\nLicense-Expression: MIT\nLicense-File: LICENSE\nKeywords: hash,hashing,password,security\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Operating System :: MacOS :: MacOS X\nClassifier: Operating System :: Microsoft :: Windows\nClassifier: Operating System :: POSIX\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: 3.13\nClassifier: Programming Language :: Python :: 3.14\nClassifier: Programming Language :: Python :: Implementation :: CPython\nClassifier: Programming Language :: Python :: Implementation :: PyPy\nClassifier: Topic :: Security :: Cryptography\nClassifier: Typing :: Typed\nRequires-Python: >=3.8\nRequires-Dist: argon2-cffi-bindings\nDescription-Content-Type: text/markdown\n\n# *argon2-cffi*: Argon2 for Python\n\n\n[Argon2](https://github.com/p-h-c/phc-winner-argon2) won the [Password Hashing Competition](https://www.password-hashing.net/) and *argon2-cffi* is the simplest way to use it in Python:\n\n```pycon\n>>> from argon2 import PasswordHasher\n>>> ph = PasswordHasher()\n>>> hash = ph.hash("correct horse battery staple")\n>>> hash # doctest: +SKIP\n'$argon2id$v=19$m=65536,t=3,p=4$MIIRqgvgQbgj220jfp0MPA$YfwJSVjtjSU0zzV/P3S9nnQ/USre2wvJMjfCIjrTQbg'\n>>> ph.verify(hash, "correct horse battery staple")\nTrue\n>>> ph.check_needs_rehash(hash)\nFalse\n>>> ph.verify(hash, "Tr0ub4dor&3")\nTraceback (most recent call last):\n ...\nargon2.exceptions.VerifyMismatchError: The password does not match the supplied hash\n\n```\n<!-- end short -->\n\n## Project Links\n\n- [**PyPI**](https://pypi.org/project/argon2-cffi/)\n- [**GitHub**](https://github.com/hynek/argon2-cffi)\n- [**Documentation**](https://argon2-cffi.readthedocs.io/)\n- [**Changelog**](https://github.com/hynek/argon2-cffi/blob/main/CHANGELOG.md)\n- [**Funding**](https://hynek.me/say-thanks/)\n- The low-level Argon2 CFFI bindings are maintained in the separate [*argon2-cffi-bindings*](https://github.com/hynek/argon2-cffi-bindings) project.\n\n## Release Information\n\n### Added\n\n- Official support for Python 3.13 and 3.14.\n No code changes were necessary.\n\n\n### Removed\n\n- Python 3.7 is not supported anymore.\n [#186](https://github.com/hynek/argon2-cffi/pull/186)\n\n\n### Changed\n\n- `argon2.PasswordHasher.check_needs_rehash()` now also accepts bytes like the rest of the API.\n [#174](https://github.com/hynek/argon2-cffi/pull/174)\n\n- Improved parameter compatibility handling for Pyodide / WebAssembly environments.\n [#190](https://github.com/hynek/argon2-cffi/pull/190)\n\n\n---\n\n[Full Changelog →](https://github.com/hynek/argon2-cffi/blob/main/CHANGELOG.md)\n\n\n## Credits\n\n*argon2-cffi* is maintained by [Hynek Schlawack](https://hynek.me/).\n\nThe development is kindly supported by my employer [Variomedia AG](https://www.variomedia.de/), *argon2-cffi* [Tidelift subscribers](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek), and my amazing [GitHub Sponsors](https://github.com/sponsors/hynek).\n\n\n## *argon2-cffi* for Enterprise\n\nAvailable as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek).\n\nThe maintainers of *argon2-cffi* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open-source packages you use to build your applications.\nSave time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use.\n
|
.venv\Lib\site-packages\argon2_cffi-25.1.0.dist-info\METADATA
|
METADATA
|
Other
| 4,119 | 0.95 | 0.066667 | 0.116883 |
awesome-app
| 452 |
2024-01-07T22:25:52.887072
|
MIT
| false |
061b14092ac544f5200e265016506463
|
argon2/__init__.py,sha256=N4S3LvR1y3WstysObwDQsF4yt8NpEot8uGAmy_MZ5fw,1869\nargon2/__main__.py,sha256=bCi1rJkhMBpiDZe3W-MfC2DFH5wYJH4RDmySLcL_Jwg,2332\nargon2/__pycache__/__init__.cpython-313.pyc,,\nargon2/__pycache__/__main__.cpython-313.pyc,,\nargon2/__pycache__/_legacy.cpython-313.pyc,,\nargon2/__pycache__/_password_hasher.cpython-313.pyc,,\nargon2/__pycache__/_utils.cpython-313.pyc,,\nargon2/__pycache__/exceptions.cpython-313.pyc,,\nargon2/__pycache__/low_level.cpython-313.pyc,,\nargon2/__pycache__/profiles.cpython-313.pyc,,\nargon2/_legacy.py,sha256=eIfk7SWuIQQGZz3FY80YW4XQQAnrjzFgeyRFgo2KtCo,2416\nargon2/_password_hasher.py,sha256=pJgSap4C2ey74IUDifbbR_Eeq-GeXvl3nRZc1Qzv3jI,8839\nargon2/_utils.py,sha256=Y3JkroYRioSHXQ5E3Sav7CclqAZkCXKo1cbsJiwqgZk,3751\nargon2/exceptions.py,sha256=sA6k8Tnlqce5uGNNbOQG2PggV91EFd2ZE2dIiB4H6nU,1322\nargon2/low_level.py,sha256=QMSxPwUQPPanGKqJqLExUh4gtqw0u49QPEqjz1nNlYM,6172\nargon2/profiles.py,sha256=nK2-7oYFuGtDxev9g050bFZsh214qwlEX3qejwOQEMY,1650\nargon2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\nargon2_cffi-25.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nargon2_cffi-25.1.0.dist-info/METADATA,sha256=6QhRB1toJh-sz0B7KUMPRealsaB1mg1QuNkxAbDeIXk,4119\nargon2_cffi-25.1.0.dist-info/RECORD,,\nargon2_cffi-25.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87\nargon2_cffi-25.1.0.dist-info/licenses/LICENSE,sha256=tpRNOG6HzPSdljLaCDaFpLdiRzPmhqf-KD3S1Cg0HXc,1115\n
|
.venv\Lib\site-packages\argon2_cffi-25.1.0.dist-info\RECORD
|
RECORD
|
Other
| 1,490 | 0.7 | 0 | 0 |
react-lib
| 656 |
2024-05-04T23:36:07.944999
|
Apache-2.0
| false |
23a6dac7400cf0430d62e5cbab9b3fa0
|
Wheel-Version: 1.0\nGenerator: hatchling 1.27.0\nRoot-Is-Purelib: true\nTag: py3-none-any\n
|
.venv\Lib\site-packages\argon2_cffi-25.1.0.dist-info\WHEEL
|
WHEEL
|
Other
| 87 | 0.5 | 0 | 0 |
react-lib
| 822 |
2024-03-23T06:53:39.233905
|
MIT
| false |
e2fcb0ad9ea59332c808928b4b439e7a
|
The MIT License (MIT)\n\nCopyright (c) 2015 Hynek Schlawack and the argon2-cffi contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n
|
.venv\Lib\site-packages\argon2_cffi-25.1.0.dist-info\licenses\LICENSE
|
LICENSE
|
Other
| 1,115 | 0.7 | 0 | 0 |
vue-tools
| 507 |
2024-03-15T15:23:17.892307
|
Apache-2.0
| false |
e91e96ef55f45fe9caf7fc3e73672c4b
|
pip\n
|
.venv\Lib\site-packages\argon2_cffi_bindings-21.2.0.dist-info\INSTALLER
|
INSTALLER
|
Other
| 4 | 0.5 | 0 | 0 |
vue-tools
| 220 |
2025-06-09T16:35:26.404071
|
MIT
| false |
365c9bfeb7d89244f2ce01c1de44cb85
|
The MIT License (MIT)\n\nCopyright (c) 2021 Hynek Schlawack\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n
|
.venv\Lib\site-packages\argon2_cffi_bindings-21.2.0.dist-info\LICENSE
|
LICENSE
|
Other
| 1,103 | 0.7 | 0 | 0 |
vue-tools
| 713 |
2024-09-01T11:28:31.872469
|
MIT
| false |
4642dfcbd13c1cc49e9f99df9de51ba1
|
Metadata-Version: 2.1\nName: argon2-cffi-bindings\nVersion: 21.2.0\nSummary: Low-level CFFI bindings for Argon2\nHome-page: https://github.com/hynek/argon2-cffi-bindings\nAuthor: Hynek Schlawack\nAuthor-email: [email protected]\nMaintainer: Hynek Schlawack\nMaintainer-email: [email protected]\nLicense: MIT\nProject-URL: Source Code, https://github.com/hynek/argon2-cffi-bindings\nProject-URL: Funding, https://github.com/sponsors/hynek\nProject-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-argon2-cffi?utm_source=pypi-argon2-cffi&utm_medium=pypi\nProject-URL: Ko-fi, https://ko-fi.com/the_hynek\nKeywords: password,hash,hashing,security,bindings,cffi\nPlatform: UNKNOWN\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: MIT License\nClassifier: Natural Language :: English\nClassifier: Operating System :: MacOS :: MacOS X\nClassifier: Operating System :: Microsoft :: Windows\nClassifier: Operating System :: POSIX\nClassifier: Operating System :: Unix\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3.6\nClassifier: Programming Language :: Python :: 3.7\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: Implementation :: CPython\nClassifier: Programming Language :: Python :: Implementation :: PyPy\nClassifier: Programming Language :: Python\nClassifier: Topic :: Security :: Cryptography\nClassifier: Topic :: Security\nClassifier: Topic :: Software Development :: Libraries :: Python Modules\nRequires-Python: >=3.6\nDescription-Content-Type: text/markdown\nLicense-File: LICENSE\nRequires-Dist: cffi (>=1.0.1)\nProvides-Extra: dev\nRequires-Dist: pytest ; extra == 'dev'\nRequires-Dist: cogapp ; extra == 'dev'\nRequires-Dist: pre-commit ; extra == 'dev'\nRequires-Dist: wheel ; extra == 'dev'\nProvides-Extra: tests\nRequires-Dist: pytest ; extra == 'tests'\n\n# Low-level Python CFFI Bindings for Argon2\n\n*argon2-cffi-bindings* provides low-level [*CFFI*](https://cffi.readthedocs.io/) bindings to the [*Argon2*] password hashing algorithm including a vendored version of them.\n\n<!-- [[[cog\n# Extract commit ID; refresh using `tox -e cog`\nimport subprocess\nout = subprocess.check_output(["git", "submodule"], text=True)\nid = out.strip().split(" ", 1)[0]\nlink = f'[**`{id[:7]}`**](https://github.com/P-H-C/phc-winner-argon2/commit/{id})'\nprint(f"The currently vendored *Argon2* commit ID is {link}.")\n]]] -->\nThe currently vendored *Argon2* commit ID is [**`f57e61e`**](https://github.com/P-H-C/phc-winner-argon2/commit/f57e61e19229e23c4445b85494dbf7c07de721cb).\n<!-- [[[end]]] -->\n\n> If you want to hash passwords in an application, this package is **not** for you.\n> Have a look at [*argon2-cffi*] with its high-level abstractions!\n\nThese bindings have been extracted from [*argon2-cffi*] and it remains its main consumer.\nHowever, they may be used by other packages that want to use the *Argon2* library without dealing with C-related complexities.\n\n\n## Usage\n\n*argon2-cffi-bindings* is available from [PyPI](https://pypi.org/project/argon2-cffi-bindings/).\nThe provided *CFFI* bindings are compiled in API mode.\n\nBest effort is given to provide binary wheels for as many platforms as possible.\n\n\n### Disabling Vendored Code\n\nA copy of [*Argon2*] is vendored and used by default, but can be disabled if *argon2-cffi-bindings* is installed using:\n\n```console\n$ env ARGON2_CFFI_USE_SYSTEM=1 \\n python -m pip install --no-binary=argon2-cffi-bindings argon2-cffi-bindings\n```\n\n\n### Overriding Automatic *SSE2* Detection\n\nUsually the build process tries to guess whether or not it should use [*SSE2*](https://en.wikipedia.org/wiki/SSE2)-optimized code (see [`_ffi_build.py`](https://github.com/hynek/argon2-cffi-bindings/blob/main/src/_argon2_cffi_bindings/_ffi_build.py) for details).\nThis can go wrong and is problematic for cross-compiling.\n\nTherefore you can use the `ARGON2_CFFI_USE_SSE2` environment variable to control the process:\n\n- If you set it to ``1``, *argon2-cffi-bindings* will build **with** SSE2 support.\n- If you set it to ``0``, *argon2-cffi-bindings* will build **without** SSE2 support.\n- If you set it to anything else, it will be ignored and *argon2-cffi-bindings* will try to guess.\n\nHowever, if our heuristics fail you, we would welcome a bug report.\n\n\n### Python API\n\nSince this package is intended to be an implementation detail, it uses a private module name to prevent your users from using it by accident.\n\nTherefore you have to import the symbols from `_argon2_cffi_bindings`:\n\n```python\nfrom _argon2_cffi_bindings import ffi, lib\n```\n\nPlease refer to [*cffi* documentation](https://cffi.readthedocs.io/en/latest/using.html) on how to use the `ffi` and `lib` objects.\n\nThe list of symbols that are provided can be found in the [`_ffi_build.py` file](https://github.com/hynek/argon2-cffi-bindings/blob/main/src/_argon2_cffi_bindings/_ffi_build.py).\n\n[*Argon2*]: https://github.com/p-h-c/phc-winner-argon2\n[*argon2-cffi*]: https://argon2-cffi.readthedocs.io/\n\n\n## Project Information\n\n*argon2-cffi-bindings* is available under the MIT license, available from [PyPI](https://pypi.org/project/argon2-cffi-bindings/), the source code and documentation can be found on [GitHub](https://github.com/hynek/argon2-cffi-bindings).\n\n*argon2-cffi-bindings* targets Python 3.6 and later, including PyPy3.\n\n\n### Credits & License\n\n*argon2-cffi-bindings* is written and maintained by [Hynek Schlawack](https://hynek.me/about/).\nIt is released under the [MIT license](https://github.com/hynek/argon2-cffi/blob/main/LICENSE>).\n\nThe development is kindly supported by [Variomedia AG](https://www.variomedia.de/).\n\nThe authors of *Argon2* were very helpful to get the library to compile on ancient versions of Visual Studio for ancient versions of Python.\n\nThe documentation quotes frequently in verbatim from the *Argon2* [paper](https://www.password-hashing.net/argon2-specs.pdf) to avoid mistakes by rephrasing.\n\n\n#### Vendored Code\n\nThe original *Argon2* repo can be found at <https://github.com/P-H-C/phc-winner-argon2/>.\n\nExcept for the components listed below, the *Argon2* code in this repository is copyright (c) 2015 Daniel Dinu, Dmitry Khovratovich (main authors), Jean-Philippe Aumasson and Samuel Neves, and under [CC0] license.\n\nThe string encoding routines in src/encoding.c are copyright (c) 2015 Thomas Pornin, and under [CC0] license.\n\nThe [*BLAKE2*](https://www.blake2.net) code in ``src/blake2/`` is copyright (c) Samuel Neves, 2013-2015, and under [CC0] license.\n\n[CC0]: https://creativecommons.org/publicdomain/zero/1.0/\n\n\n
|
.venv\Lib\site-packages\argon2_cffi_bindings-21.2.0.dist-info\METADATA
|
METADATA
|
Other
| 6,705 | 0.95 | 0.072368 | 0.132075 |
vue-tools
| 885 |
2023-07-25T05:06:11.986296
|
Apache-2.0
| false |
ff3a28401f15db5e1d6759a406c21d6e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.