diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 571bd1db..be5cb272 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,46 +8,70 @@ on: workflow_dispatch: jobs: - - mypy: - name: 'MyPy' - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Setup python - uses: actions/setup-python@v2 - with: - python-version: '3.11' - - - name: Install dependencies - run: pip install -U . --upgrade-strategy eager -r requirements-test.txt - - - name: Run MyPy check - run: mypy tractor/ --ignore-missing-imports --show-traceback - + # ------ sdist ------ # test that we can generate a software distribution and install it # thus avoid missing file issues after packaging. + # + # -[x] produce sdist with uv + # ------ - ------ sdist-linux: name: 'sdist' runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - - name: Setup python - uses: actions/setup-python@v2 - with: - python-version: '3.11' + - name: Install latest uv + uses: astral-sh/setup-uv@v6 - - name: Build sdist - run: python setup.py sdist --formats=zip + - name: Build sdist as tar.gz + run: uv build --sdist --python=3.13 - - name: Install sdist from .zips - run: python -m pip install dist/*.zip + - name: Install sdist from .tar.gz + run: python -m pip install dist/*.tar.gz + + # ------ type-check ------ + # mypy: + # name: 'MyPy' + # runs-on: ubuntu-latest + + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + + # - name: Install latest uv + # uses: astral-sh/setup-uv@v6 + + # # faster due to server caching? + # # https://docs.astral.sh/uv/guides/integration/github/#setting-up-python + # - name: "Set up Python" + # uses: actions/setup-python@v6 + # with: + # python-version-file: "pyproject.toml" + + # # w uv + # # - name: Set up Python + # # run: uv python install + + # - name: Setup uv venv + # run: uv venv .venv --python=3.13 + + # - name: Install + # run: uv sync --dev + + # # TODO, ty cmd over repo + # # - name: type check with ty + # # run: ty ./tractor/ + + # # - uses: actions/cache@v3 + # # name: Cache uv virtenv as default .venv + # # with: + # # path: ./.venv + # # key: venv-${{ hashFiles('uv.lock') }} + + # - name: Run MyPy check + # run: mypy tractor/ --ignore-missing-imports --show-traceback testing-linux: @@ -59,32 +83,45 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python: ['3.11'] + python-version: ['3.13'] spawn_backend: [ 'trio', - 'mp_spawn', - 'mp_forkserver', + # 'mp_spawn', + # 'mp_forkserver', ] steps: - - name: Checkout - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - - name: Setup python - uses: actions/setup-python@v2 + - name: 'Install uv + py-${{ matrix.python-version }}' + uses: astral-sh/setup-uv@v6 with: - python-version: '${{ matrix.python }}' + python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager + # GH way.. faster? + # - name: setup-python@v6 + # uses: actions/setup-python@v6 + # with: + # python-version: '${{ matrix.python-version }}' - - name: List dependencies - run: pip list + # consider caching for speedups? + # https://docs.astral.sh/uv/guides/integration/github/#caching + + - name: Install the project w uv + run: uv sync --all-extras --dev + + # - name: Install dependencies + # run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager + + - name: List deps tree + run: uv tree - name: Run tests - run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx + run: uv run pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx + # XXX legacy NOTE XXX + # # We skip 3.10 on windows for now due to not having any collabs to # debug the CI failures. Anyone wanting to hack and solve them is very # welcome, but our primary user base is not using that OS. diff --git a/default.nix b/default.nix new file mode 100644 index 00000000..08e46d06 --- /dev/null +++ b/default.nix @@ -0,0 +1,19 @@ +{ pkgs ? import {} }: +let + nativeBuildInputs = with pkgs; [ + stdenv.cc.cc.lib + uv + ]; + +in +pkgs.mkShell { + inherit nativeBuildInputs; + + LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath nativeBuildInputs; + TMPDIR = "/tmp"; + + shellHook = '' + set -e + uv venv .venv --python=3.12 + ''; +} diff --git a/docs/README.rst b/docs/README.rst index e3bd9f84..cea223ee 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -1,8 +1,5 @@ |logo| ``tractor``: distributed structurred concurrency -|gh_actions| -|docs| - ``tractor`` is a `structured concurrency`_ (SC), multi-processing_ runtime built on trio_. Fundamentally, ``tractor`` provides parallelism via @@ -66,6 +63,13 @@ Features - (WIP) a ``TaskMngr``: one-cancels-one style nursery supervisor. +Status of `main` / infra +------------------------ + +- |gh_actions| +- |docs| + + Install ------- ``tractor`` is still in a *alpha-near-beta-stage* for many @@ -689,9 +693,11 @@ channel`_! .. _msgspec: https://jcristharif.com/msgspec/ .. _guest: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops - -.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square - :target: https://actions-badge.atrox.dev/goodboy/tractor/goto +.. + NOTE, on generating badge links from the UI + https://docs.github.com/en/actions/how-tos/monitoring-and-troubleshooting-workflows/monitoring-workflows/adding-a-workflow-status-badge?ref=gitguardian-blog-automated-secrets-detection#using-the-ui +.. |gh_actions| image:: https://github.com/goodboy/tractor/actions/workflows/ci.yml/badge.svg?branch=main + :target: https://github.com/goodboy/tractor/actions/workflows/ci.yml .. |docs| image:: https://readthedocs.org/projects/tractor/badge/?version=latest :target: https://tractor.readthedocs.io/en/latest/?badge=latest diff --git a/examples/advanced_faults/ipc_failure_during_stream.py b/examples/advanced_faults/ipc_failure_during_stream.py index 950d5a6f..f3a709e0 100644 --- a/examples/advanced_faults/ipc_failure_during_stream.py +++ b/examples/advanced_faults/ipc_failure_during_stream.py @@ -120,6 +120,7 @@ async def main( break_parent_ipc_after: int|bool = False, break_child_ipc_after: int|bool = False, pre_close: bool = False, + tpt_proto: str = 'tcp', ) -> None: @@ -131,6 +132,7 @@ async def main( # a hang since it never engages due to broken IPC debug_mode=debug_mode, loglevel=loglevel, + enable_transports=[tpt_proto], ) as an, ): @@ -145,7 +147,8 @@ async def main( _testing.expect_ctxc( yay=( break_parent_ipc_after - or break_child_ipc_after + or + break_child_ipc_after ), # TODO: we CAN'T remove this right? # since we need the ctxc to bubble up from either diff --git a/examples/debugging/asyncio_bp.py b/examples/debugging/asyncio_bp.py index 296dbccb..fc3b222a 100644 --- a/examples/debugging/asyncio_bp.py +++ b/examples/debugging/asyncio_bp.py @@ -29,7 +29,7 @@ async def bp_then_error( to_trio.send_nowait('start') # NOTE: what happens here inside the hook needs some refinement.. - # => seems like it's still `._debug._set_trace()` but + # => seems like it's still `.debug._set_trace()` but # we set `Lock.local_task_in_debug = 'sync'`, we probably want # some further, at least, meta-data about the task/actor in debug # in terms of making it clear it's `asyncio` mucking about. diff --git a/examples/debugging/restore_builtin_breakpoint.py b/examples/debugging/restore_builtin_breakpoint.py index b591b0f7..06c3bbc4 100644 --- a/examples/debugging/restore_builtin_breakpoint.py +++ b/examples/debugging/restore_builtin_breakpoint.py @@ -4,6 +4,11 @@ import sys import trio import tractor +# ensure mod-path is correct! +from tractor.devx.debug import ( + _sync_pause_from_builtin as _sync_pause_from_builtin, +) + async def main() -> None: @@ -13,19 +18,23 @@ async def main() -> None: async with tractor.open_nursery( debug_mode=True, - ) as an: - assert an + loglevel='devx', + maybe_enable_greenback=True, + # ^XXX REQUIRED to enable `breakpoint()` support (from sync + # fns) and thus required here to avoid an assertion err + # on the next line + ): assert ( (pybp_var := os.environ['PYTHONBREAKPOINT']) == - 'tractor.devx._debug._sync_pause_from_builtin' + 'tractor.devx.debug._sync_pause_from_builtin' ) # TODO: an assert that verifies the hook has indeed been, hooked # XD assert ( (pybp_hook := sys.breakpointhook) - is not tractor.devx._debug._set_trace + is not tractor.devx.debug._set_trace ) print( diff --git a/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py b/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py index 16f92b81..72c6de4c 100644 --- a/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py +++ b/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py @@ -24,10 +24,9 @@ async def spawn_until(depth=0): async def main(): - """The main ``tractor`` routine. - - The process tree should look as approximately as follows when the debugger - first engages: + ''' + The process tree should look as approximately as follows when the + debugger first engages: python examples/debugging/multi_nested_subactors_bp_forever.py ├─ python -m tractor._child --uid ('spawner1', '7eab8462 ...) @@ -37,10 +36,11 @@ async def main(): └─ python -m tractor._child --uid ('spawner0', '1d42012b ...) └─ python -m tractor._child --uid ('name_error', '6c2733b8 ...) - """ + ''' async with tractor.open_nursery( debug_mode=True, - loglevel='warning' + loglevel='devx', + enable_transports=['uds'], ) as n: # spawn both actors diff --git a/examples/debugging/shield_hang_in_sub.py b/examples/debugging/shield_hang_in_sub.py index 5387353f..bf045fe8 100644 --- a/examples/debugging/shield_hang_in_sub.py +++ b/examples/debugging/shield_hang_in_sub.py @@ -37,6 +37,7 @@ async def main( enable_stack_on_sig=True, # maybe_enable_greenback=False, loglevel='devx', + enable_transports=['uds'], ) as an, ): ptl: tractor.Portal = await an.start_actor( diff --git a/examples/debugging/sync_bp.py b/examples/debugging/sync_bp.py index 95472c93..a26a9c54 100644 --- a/examples/debugging/sync_bp.py +++ b/examples/debugging/sync_bp.py @@ -6,7 +6,7 @@ import tractor # TODO: only import these when not running from test harness? # can we detect `pexpect` usage maybe? -# from tractor.devx._debug import ( +# from tractor.devx.debug import ( # get_lock, # get_debug_req, # ) diff --git a/examples/service_discovery.py b/examples/service_discovery.py index a0f37b88..1219f0c1 100644 --- a/examples/service_discovery.py +++ b/examples/service_discovery.py @@ -9,7 +9,7 @@ async def main(service_name): async with tractor.open_nursery() as an: await an.start_actor(service_name) - async with tractor.get_registry('127.0.0.1', 1616) as portal: + async with tractor.get_registry() as portal: print(f"Arbiter is listening on {portal.channel}") async with tractor.wait_for_actor(service_name) as sockaddr: diff --git a/pyproject.toml b/pyproject.toml index b3e9e100..a0491598 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,8 @@ dependencies = [ "pdbp>=1.6,<2", # windows only (from `pdbp`) # typed IPC msging "msgspec>=0.19.0", + "cffi>=1.17.1", + "bidict>=0.23.1", ] # ------ project ------ @@ -59,9 +61,13 @@ dev = [ # `tractor.devx` tooling "greenback>=1.2.1,<2", "stackscope>=0.2.2,<0.3", + # ^ requires this? + "typing-extensions>=4.14.1", + "pyperclip>=1.9.0", "prompt-toolkit>=3.0.50", "xonsh>=0.19.2", + "psutil>=7.0.0", ] # TODO, add these with sane versions; were originally in # `requirements-docs.txt`.. diff --git a/tests/conftest.py b/tests/conftest.py index 674767ff..b84f4105 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,24 +1,27 @@ """ -``tractor`` testing!! +Top level of the testing suites! + """ +from __future__ import annotations import sys import subprocess import os -import random import signal import platform import time import pytest -import tractor from tractor._testing import ( examples_dir as examples_dir, tractor_test as tractor_test, expect_ctxc as expect_ctxc, ) -# TODO: include wtv plugin(s) we build in `._testing.pytest`? -pytest_plugins = ['pytester'] +pytest_plugins: list[str] = [ + 'pytester', + 'tractor._testing.pytest', +] + # Sending signal.SIGINT on subprocess fails on windows. Use CTRL_* alternatives if platform.system() == 'Windows': @@ -30,7 +33,11 @@ else: _KILL_SIGNAL = signal.SIGKILL _INT_SIGNAL = signal.SIGINT _INT_RETURN_CODE = 1 if sys.version_info < (3, 8) else -signal.SIGINT.value - _PROC_SPAWN_WAIT = 0.6 if sys.version_info < (3, 7) else 0.4 + _PROC_SPAWN_WAIT = ( + 0.6 + if sys.version_info < (3, 7) + else 0.4 + ) no_windows = pytest.mark.skipif( @@ -39,7 +46,12 @@ no_windows = pytest.mark.skipif( ) -def pytest_addoption(parser): +def pytest_addoption( + parser: pytest.Parser, +): + # ?TODO? should this be exposed from our `._testing.pytest` + # plugin or should we make it more explicit with `--tl` for + # tractor logging like we do in other client projects? parser.addoption( "--ll", action="store", @@ -47,42 +59,10 @@ def pytest_addoption(parser): default='ERROR', help="logging level to set when testing" ) - parser.addoption( - "--spawn-backend", - action="store", - dest='spawn_backend', - default='trio', - help="Processing spawning backend to use for test run", - ) - - parser.addoption( - "--tpdb", "--debug-mode", - action="store_true", - dest='tractor_debug_mode', - # default=False, - help=( - 'Enable a flag that can be used by tests to to set the ' - '`debug_mode: bool` for engaging the internal ' - 'multi-proc debugger sys.' - ), - ) - - -def pytest_configure(config): - backend = config.option.spawn_backend - tractor._spawn.try_set_start_method(backend) - - -@pytest.fixture(scope='session') -def debug_mode(request): - debug_mode: bool = request.config.option.tractor_debug_mode - # if debug_mode: - # breakpoint() - return debug_mode - @pytest.fixture(scope='session', autouse=True) def loglevel(request): + import tractor orig = tractor.log._default_loglevel level = tractor.log._default_loglevel = request.config.option.loglevel tractor.log.get_console_log(level) @@ -90,106 +70,44 @@ def loglevel(request): tractor.log._default_loglevel = orig -@pytest.fixture(scope='session') -def spawn_backend(request) -> str: - return request.config.option.spawn_backend - - -# @pytest.fixture(scope='function', autouse=True) -# def debug_enabled(request) -> str: -# from tractor import _state -# if _state._runtime_vars['_debug_mode']: -# breakpoint() - _ci_env: bool = os.environ.get('CI', False) @pytest.fixture(scope='session') def ci_env() -> bool: ''' - Detect CI envoirment. + Detect CI environment. ''' return _ci_env -# TODO: also move this to `._testing` for now? -# -[ ] possibly generalize and re-use for multi-tree spawning -# along with the new stuff for multi-addrs in distribute_dis -# branch? -# -# choose randomly at import time -_reg_addr: tuple[str, int] = ( - '127.0.0.1', - random.randint(1000, 9999), -) - - -@pytest.fixture(scope='session') -def reg_addr() -> tuple[str, int]: - - # globally override the runtime to the per-test-session-dynamic - # addr so that all tests never conflict with any other actor - # tree using the default. - from tractor import _root - _root._default_lo_addrs = [_reg_addr] - - return _reg_addr - - -def pytest_generate_tests(metafunc): - spawn_backend = metafunc.config.option.spawn_backend - - if not spawn_backend: - # XXX some weird windows bug with `pytest`? - spawn_backend = 'trio' - - # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? - assert spawn_backend in ( - 'mp_spawn', - 'mp_forkserver', - 'trio', - ) - - # NOTE: used to be used to dyanmically parametrize tests for when - # you just passed --spawn-backend=`mp` on the cli, but now we expect - # that cli input to be manually specified, BUT, maybe we'll do - # something like this again in the future? - if 'start_method' in metafunc.fixturenames: - metafunc.parametrize("start_method", [spawn_backend], scope='module') - - -# TODO: a way to let test scripts (like from `examples/`) -# guarantee they won't registry addr collide! -# @pytest.fixture -# def open_test_runtime( -# reg_addr: tuple, -# ) -> AsyncContextManager: -# return partial( -# tractor.open_nursery, -# registry_addrs=[reg_addr], -# ) - - -def sig_prog(proc, sig): +def sig_prog( + proc: subprocess.Popen, + sig: int, + canc_timeout: float = 0.1, +) -> int: "Kill the actor-process with ``sig``." proc.send_signal(sig) - time.sleep(0.1) + time.sleep(canc_timeout) if not proc.poll(): # TODO: why sometimes does SIGINT not work on teardown? # seems to happen only when trace logging enabled? proc.send_signal(_KILL_SIGNAL) - ret = proc.wait() + ret: int = proc.wait() assert ret # TODO: factor into @cm and move to `._testing`? @pytest.fixture def daemon( + debug_mode: bool, loglevel: str, - testdir, + testdir: pytest.Pytester, reg_addr: tuple[str, int], -): + tpt_proto: str, + +) -> subprocess.Popen: ''' Run a daemon root actor as a separate actor-process tree and "remote registrar" for discovery-protocol related tests. @@ -200,28 +118,100 @@ def daemon( loglevel: str = 'info' code: str = ( - "import tractor; " - "tractor.run_daemon([], registry_addrs={reg_addrs}, loglevel={ll})" + "import tractor; " + "tractor.run_daemon([], " + "registry_addrs={reg_addrs}, " + "debug_mode={debug_mode}, " + "loglevel={ll})" ).format( reg_addrs=str([reg_addr]), ll="'{}'".format(loglevel) if loglevel else None, + debug_mode=debug_mode, ) cmd: list[str] = [ sys.executable, '-c', code, ] + # breakpoint() kwargs = {} if platform.system() == 'Windows': # without this, tests hang on windows forever kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP - proc = testdir.popen( + proc: subprocess.Popen = testdir.popen( cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, **kwargs, ) - assert not proc.returncode + + # UDS sockets are **really** fast to bind()/listen()/connect() + # so it's often required that we delay a bit more starting + # the first actor-tree.. + if tpt_proto == 'uds': + global _PROC_SPAWN_WAIT + _PROC_SPAWN_WAIT = 0.6 + time.sleep(_PROC_SPAWN_WAIT) + + assert not proc.returncode yield proc sig_prog(proc, _INT_SIGNAL) + + # XXX! yeah.. just be reaaal careful with this bc sometimes it + # can lock up on the `_io.BufferedReader` and hang.. + stderr: str = proc.stderr.read().decode() + if stderr: + print( + f'Daemon actor tree produced STDERR:\n' + f'{proc.args}\n' + f'\n' + f'{stderr}\n' + ) + if proc.returncode != -2: + raise RuntimeError( + 'Daemon actor tree failed !?\n' + f'{proc.args}\n' + ) + + +# @pytest.fixture(autouse=True) +# def shared_last_failed(pytestconfig): +# val = pytestconfig.cache.get("example/value", None) +# breakpoint() +# if val is None: +# pytestconfig.cache.set("example/value", val) +# return val + + +# TODO: a way to let test scripts (like from `examples/`) +# guarantee they won't `registry_addrs` collide! +# -[ ] maybe use some kinda standard `def main()` arg-spec that +# we can introspect from a fixture that is called from the test +# body? +# -[ ] test and figure out typing for below prototype! Bp +# +# @pytest.fixture +# def set_script_runtime_args( +# reg_addr: tuple, +# ) -> Callable[[...], None]: + +# def import_n_partial_in_args_n_triorun( +# script: Path, # under examples? +# **runtime_args, +# ) -> Callable[[], Any]: # a `partial`-ed equiv of `trio.run()` + +# # NOTE, below is taken from +# # `.test_advanced_faults.test_ipc_channel_break_during_stream` +# mod: ModuleType = import_path( +# examples_dir() / 'advanced_faults' +# / 'ipc_failure_during_stream.py', +# root=examples_dir(), +# consider_namespace_packages=False, +# ) +# return partial( +# trio.run, +# partial( +# mod.main, +# **runtime_args, +# ) +# ) +# return import_n_partial_in_args_n_triorun diff --git a/tests/devx/conftest.py b/tests/devx/conftest.py index c45265dc..9a5c90a5 100644 --- a/tests/devx/conftest.py +++ b/tests/devx/conftest.py @@ -2,9 +2,11 @@ `tractor.devx.*` tooling sub-pkg test space. ''' +from __future__ import annotations import time from typing import ( Callable, + TYPE_CHECKING, ) import pytest @@ -16,7 +18,7 @@ from pexpect.spawnbase import SpawnBase from tractor._testing import ( mk_cmd, ) -from tractor.devx._debug import ( +from tractor.devx.debug import ( _pause_msg as _pause_msg, _crash_msg as _crash_msg, _repl_fail_msg as _repl_fail_msg, @@ -26,14 +28,22 @@ from ..conftest import ( _ci_env, ) +if TYPE_CHECKING: + from pexpect import pty_spawn + + +# a fn that sub-instantiates a `pexpect.spawn()` +# and returns it. +type PexpectSpawner = Callable[[str], pty_spawn.spawn] + @pytest.fixture def spawn( - start_method, + start_method: str, testdir: pytest.Pytester, reg_addr: tuple[str, int], -) -> Callable[[str], None]: +) -> PexpectSpawner: ''' Use the `pexpect` module shipped via `testdir.spawn()` to run an `./examples/..` script by name. @@ -59,7 +69,7 @@ def spawn( def _spawn( cmd: str, **mkcmd_kwargs, - ): + ) -> pty_spawn.spawn: unset_colors() return testdir.spawn( cmd=mk_cmd( @@ -73,7 +83,7 @@ def spawn( ) # such that test-dep can pass input script name. - return _spawn + return _spawn # the `PexpectSpawner`, type alias. @pytest.fixture( @@ -111,7 +121,7 @@ def ctlc( # XXX: disable pygments highlighting for auto-tests # since some envs (like actions CI) will struggle # the the added color-char encoding.. - from tractor.devx._debug import TractorConfig + from tractor.devx.debug import TractorConfig TractorConfig.use_pygements = False yield use_ctlc diff --git a/tests/devx/test_debugger.py b/tests/devx/test_debugger.py index 171e983e..5a40caea 100644 --- a/tests/devx/test_debugger.py +++ b/tests/devx/test_debugger.py @@ -528,7 +528,7 @@ def test_multi_daemon_subactors( # now the root actor won't clobber the bp_forever child # during it's first access to the debug lock, but will instead # wait for the lock to release, by the edge triggered - # ``devx._debug.Lock.no_remote_has_tty`` event before sending cancel messages + # ``devx.debug.Lock.no_remote_has_tty`` event before sending cancel messages # (via portals) to its underlings B) # at some point here there should have been some warning msg from diff --git a/tests/devx/test_tooling.py b/tests/devx/test_tooling.py index 2debe3f7..c1bb8692 100644 --- a/tests/devx/test_tooling.py +++ b/tests/devx/test_tooling.py @@ -13,9 +13,13 @@ TODO: when debugging a problem inside the stack vs. in their app. ''' +from __future__ import annotations import os import signal import time +from typing import ( + TYPE_CHECKING, +) from .conftest import ( expect, @@ -29,9 +33,12 @@ from pexpect.exceptions import ( EOF, ) +if TYPE_CHECKING: + from ..conftest import PexpectSpawner + def test_shield_pause( - spawn, + spawn: PexpectSpawner, ): ''' Verify the `tractor.pause()/.post_mortem()` API works inside an @@ -126,7 +133,7 @@ def test_shield_pause( def test_breakpoint_hook_restored( - spawn, + spawn: PexpectSpawner, ): ''' Ensures our actor runtime sets a custom `breakpoint()` hook @@ -140,16 +147,22 @@ def test_breakpoint_hook_restored( child = spawn('restore_builtin_breakpoint') child.expect(PROMPT) - assert_before( - child, - [ - _pause_msg, - " None: + if maybe_daemon: + popen, proc = maybe_daemon + # breakpoint() + async with tractor.open_root_actor( registry_addrs=[reg_addr], + debug_mode=debug_mode, ): - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: # runtime needs to be up to call this actor = tractor.current_actor() @@ -176,11 +188,11 @@ async def spawn_and_check_registry( extra = 2 # local root actor + remote arbiter # ensure current actor is registered - registry = await get_reg() + registry: dict = await get_reg() assert actor.uid in registry try: - async with tractor.open_nursery() as n: + async with tractor.open_nursery() as an: async with trio.open_nursery( strict_exception_groups=False, ) as trion: @@ -189,17 +201,17 @@ async def spawn_and_check_registry( for i in range(3): name = f'a{i}' if with_streaming: - portals[name] = await n.start_actor( + portals[name] = await an.start_actor( name=name, enable_modules=[__name__]) else: # no streaming - portals[name] = await n.run_in_actor( + portals[name] = await an.run_in_actor( trio.sleep_forever, name=name) # wait on last actor to come up async with tractor.wait_for_actor(name): registry = await get_reg() - for uid in n._children: + for uid in an._children: assert uid in registry assert len(portals) + extra == len(registry) @@ -232,6 +244,7 @@ async def spawn_and_check_registry( @pytest.mark.parametrize('use_signal', [False, True]) @pytest.mark.parametrize('with_streaming', [False, True]) def test_subactors_unregister_on_cancel( + debug_mode: bool, start_method, use_signal, reg_addr, @@ -248,6 +261,7 @@ def test_subactors_unregister_on_cancel( spawn_and_check_registry, reg_addr, use_signal, + debug_mode=debug_mode, remote_arbiter=False, with_streaming=with_streaming, ), @@ -257,7 +271,8 @@ def test_subactors_unregister_on_cancel( @pytest.mark.parametrize('use_signal', [False, True]) @pytest.mark.parametrize('with_streaming', [False, True]) def test_subactors_unregister_on_cancel_remote_daemon( - daemon, + daemon: subprocess.Popen, + debug_mode: bool, start_method, use_signal, reg_addr, @@ -273,8 +288,13 @@ def test_subactors_unregister_on_cancel_remote_daemon( spawn_and_check_registry, reg_addr, use_signal, + debug_mode=debug_mode, remote_arbiter=True, with_streaming=with_streaming, + maybe_daemon=( + daemon, + psutil.Process(daemon.pid) + ), ), ) @@ -300,7 +320,7 @@ async def close_chans_before_nursery( async with tractor.open_root_actor( registry_addrs=[reg_addr], ): - async with tractor.get_registry(*reg_addr) as aportal: + async with tractor.get_registry(reg_addr) as aportal: try: get_reg = partial(unpack_reg, aportal) @@ -373,7 +393,7 @@ def test_close_channel_explicit( @pytest.mark.parametrize('use_signal', [False, True]) def test_close_channel_explicit_remote_arbiter( - daemon, + daemon: subprocess.Popen, start_method, use_signal, reg_addr, diff --git a/tests/test_docs_examples.py b/tests/test_docs_examples.py index cc4904f8..6250e0aa 100644 --- a/tests/test_docs_examples.py +++ b/tests/test_docs_examples.py @@ -66,6 +66,9 @@ def run_example_in_subproc( # due to backpressure!!! proc = testdir.popen( cmdargs, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs, ) assert not proc.returncode @@ -119,10 +122,14 @@ def test_example( code = ex.read() with run_example_in_subproc(code) as proc: - proc.wait() - err, _ = proc.stderr.read(), proc.stdout.read() - # print(f'STDERR: {err}') - # print(f'STDOUT: {out}') + err = None + try: + if not proc.poll(): + _, err = proc.communicate(timeout=15) + + except subprocess.TimeoutExpired as e: + proc.kill() + err = e.stderr # if we get some gnarly output let's aggregate and raise if err: diff --git a/tests/test_infected_asyncio.py b/tests/test_infected_asyncio.py index 465decca..195ed4cd 100644 --- a/tests/test_infected_asyncio.py +++ b/tests/test_infected_asyncio.py @@ -889,7 +889,7 @@ async def manage_file( # NOTE: turns out you don't even need to sched an aio task # since the original issue, even though seemingly was due to - # the guest-run being abandoned + a `._debug.pause()` inside + # the guest-run being abandoned + a `.debug.pause()` inside # `._runtime._async_main()` (which was originally trying to # debug the `.lifetime_stack` not closing), IS NOT actually # the core issue? @@ -1101,7 +1101,7 @@ def test_sigint_closes_lifetime_stack( # => completed using `.bestow_portal(task)` inside # `.to_asyncio._run_asyncio_task()` right? # -[ ] translation func to get from `asyncio` task calling to -# `._debug.wait_for_parent_stdin_hijack()` which does root +# `.debug.wait_for_parent_stdin_hijack()` which does root # call to do TTY locking. # def test_sync_breakpoint(): diff --git a/tests/test_inter_peer_cancellation.py b/tests/test_inter_peer_cancellation.py index bac9a791..25935df2 100644 --- a/tests/test_inter_peer_cancellation.py +++ b/tests/test_inter_peer_cancellation.py @@ -871,7 +871,7 @@ async def serve_subactors( ) await ipc.send(( peer.chan.uid, - peer.chan.raddr, + peer.chan.raddr.unwrap(), )) print('Spawner exiting spawn serve loop!') diff --git a/tests/test_local.py b/tests/test_local.py index ecdad5fe..c6f5047a 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -38,7 +38,7 @@ async def test_self_is_registered_localportal(reg_addr): "Verify waiting on the arbiter to register itself using a local portal." actor = tractor.current_actor() assert actor.is_arbiter - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: assert isinstance(portal, tractor._portal.LocalPortal) with trio.fail_after(0.2): diff --git a/tests/test_multi_program.py b/tests/test_multi_program.py index 860eeebb..b0b145ee 100644 --- a/tests/test_multi_program.py +++ b/tests/test_multi_program.py @@ -32,7 +32,7 @@ def test_abort_on_sigint(daemon): @tractor_test async def test_cancel_remote_arbiter(daemon, reg_addr): assert not tractor.current_actor().is_arbiter - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: await portal.cancel_actor() time.sleep(0.1) @@ -41,7 +41,7 @@ async def test_cancel_remote_arbiter(daemon, reg_addr): # no arbiter socket should exist with pytest.raises(OSError): - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: pass diff --git a/tests/test_resource_cache.py b/tests/test_resource_cache.py index d3859814..bdcdd6c9 100644 --- a/tests/test_resource_cache.py +++ b/tests/test_resource_cache.py @@ -100,16 +100,29 @@ async def streamer( @acm async def open_stream() -> Awaitable[tractor.MsgStream]: - async with tractor.open_nursery() as tn: - portal = await tn.start_actor('streamer', enable_modules=[__name__]) - async with ( - portal.open_context(streamer) as (ctx, first), - ctx.open_stream() as stream, - ): - yield stream + try: + async with tractor.open_nursery() as an: + portal = await an.start_actor( + 'streamer', + enable_modules=[__name__], + ) + async with ( + portal.open_context(streamer) as (ctx, first), + ctx.open_stream() as stream, + ): + yield stream - await portal.cancel_actor() - print('CANCELLED STREAMER') + print('Cancelling streamer') + await portal.cancel_actor() + print('Cancelled streamer') + + except Exception as err: + print( + f'`open_stream()` errored?\n' + f'{err!r}\n' + ) + await tractor.pause(shield=True) + raise err @acm @@ -132,19 +145,28 @@ async def maybe_open_stream(taskname: str): yield stream -def test_open_local_sub_to_stream(): +def test_open_local_sub_to_stream( + debug_mode: bool, +): ''' Verify a single inter-actor stream can can be fanned-out shared to - N local tasks using ``trionics.maybe_open_context():``. + N local tasks using `trionics.maybe_open_context()`. ''' - timeout: float = 3.6 if platform.system() != "Windows" else 10 + timeout: float = 3.6 + if platform.system() == "Windows": + timeout: float = 10 + + if debug_mode: + timeout = 999 async def main(): full = list(range(1000)) async def get_sub_and_pull(taskname: str): + + stream: tractor.MsgStream async with ( maybe_open_stream(taskname) as stream, ): @@ -165,17 +187,27 @@ def test_open_local_sub_to_stream(): assert set(seq).issubset(set(full)) print(f'{taskname} finished') - with trio.fail_after(timeout): + with trio.fail_after(timeout) as cs: # TODO: turns out this isn't multi-task entrant XD # We probably need an indepotent entry semantic? - async with tractor.open_root_actor(): + async with tractor.open_root_actor( + debug_mode=debug_mode, + ): async with ( - trio.open_nursery() as nurse, + trio.open_nursery() as tn, ): for i in range(10): - nurse.start_soon(get_sub_and_pull, f'task_{i}') + tn.start_soon( + get_sub_and_pull, + f'task_{i}', + ) await trio.sleep(0.001) print('all consumer tasks finished') + if cs.cancelled_caught: + pytest.fail( + 'Should NOT time out in `open_root_actor()` ?' + ) + trio.run(main) diff --git a/tests/test_ringbuf.py b/tests/test_ringbuf.py new file mode 100644 index 00000000..0d3b420b --- /dev/null +++ b/tests/test_ringbuf.py @@ -0,0 +1,211 @@ +import time + +import trio +import pytest + +import tractor +from tractor.ipc._ringbuf import ( + open_ringbuf, + RBToken, + RingBuffSender, + RingBuffReceiver +) +from tractor._testing.samples import ( + generate_sample_messages, +) + +# in case you don't want to melt your cores, uncomment dis! +pytestmark = pytest.mark.skip + + +@tractor.context +async def child_read_shm( + ctx: tractor.Context, + msg_amount: int, + token: RBToken, + total_bytes: int, +) -> None: + recvd_bytes = 0 + await ctx.started() + start_ts = time.time() + async with RingBuffReceiver(token) as receiver: + while recvd_bytes < total_bytes: + msg = await receiver.receive_some() + recvd_bytes += len(msg) + + # make sure we dont hold any memoryviews + # before the ctx manager aclose() + msg = None + + end_ts = time.time() + elapsed = end_ts - start_ts + elapsed_ms = int(elapsed * 1000) + + print(f'\n\telapsed ms: {elapsed_ms}') + print(f'\tmsg/sec: {int(msg_amount / elapsed):,}') + print(f'\tbytes/sec: {int(recvd_bytes / elapsed):,}') + + +@tractor.context +async def child_write_shm( + ctx: tractor.Context, + msg_amount: int, + rand_min: int, + rand_max: int, + token: RBToken, +) -> None: + msgs, total_bytes = generate_sample_messages( + msg_amount, + rand_min=rand_min, + rand_max=rand_max, + ) + await ctx.started(total_bytes) + async with RingBuffSender(token) as sender: + for msg in msgs: + await sender.send_all(msg) + + +@pytest.mark.parametrize( + 'msg_amount,rand_min,rand_max,buf_size', + [ + # simple case, fixed payloads, large buffer + (100_000, 0, 0, 10 * 1024), + + # guaranteed wrap around on every write + (100, 10 * 1024, 20 * 1024, 10 * 1024), + + # large payload size, but large buffer + (10_000, 256 * 1024, 512 * 1024, 10 * 1024 * 1024) + ], + ids=[ + 'fixed_payloads_large_buffer', + 'wrap_around_every_write', + 'large_payloads_large_buffer', + ] +) +def test_ringbuf( + msg_amount: int, + rand_min: int, + rand_max: int, + buf_size: int +): + async def main(): + with open_ringbuf( + 'test_ringbuf', + buf_size=buf_size + ) as token: + proc_kwargs = { + 'pass_fds': (token.write_eventfd, token.wrap_eventfd) + } + + common_kwargs = { + 'msg_amount': msg_amount, + 'token': token, + } + async with tractor.open_nursery() as an: + send_p = await an.start_actor( + 'ring_sender', + enable_modules=[__name__], + proc_kwargs=proc_kwargs + ) + recv_p = await an.start_actor( + 'ring_receiver', + enable_modules=[__name__], + proc_kwargs=proc_kwargs + ) + async with ( + send_p.open_context( + child_write_shm, + rand_min=rand_min, + rand_max=rand_max, + **common_kwargs + ) as (sctx, total_bytes), + recv_p.open_context( + child_read_shm, + **common_kwargs, + total_bytes=total_bytes, + ) as (sctx, _sent), + ): + await recv_p.result() + + await send_p.cancel_actor() + await recv_p.cancel_actor() + + + trio.run(main) + + +@tractor.context +async def child_blocked_receiver( + ctx: tractor.Context, + token: RBToken +): + async with RingBuffReceiver(token) as receiver: + await ctx.started() + await receiver.receive_some() + + +def test_ring_reader_cancel(): + async def main(): + with open_ringbuf('test_ring_cancel_reader') as token: + async with ( + tractor.open_nursery() as an, + RingBuffSender(token) as _sender, + ): + recv_p = await an.start_actor( + 'ring_blocked_receiver', + enable_modules=[__name__], + proc_kwargs={ + 'pass_fds': (token.write_eventfd, token.wrap_eventfd) + } + ) + async with ( + recv_p.open_context( + child_blocked_receiver, + token=token + ) as (sctx, _sent), + ): + await trio.sleep(1) + await an.cancel() + + + with pytest.raises(tractor._exceptions.ContextCancelled): + trio.run(main) + + +@tractor.context +async def child_blocked_sender( + ctx: tractor.Context, + token: RBToken +): + async with RingBuffSender(token) as sender: + await ctx.started() + await sender.send_all(b'this will wrap') + + +def test_ring_sender_cancel(): + async def main(): + with open_ringbuf( + 'test_ring_cancel_sender', + buf_size=1 + ) as token: + async with tractor.open_nursery() as an: + recv_p = await an.start_actor( + 'ring_blocked_sender', + enable_modules=[__name__], + proc_kwargs={ + 'pass_fds': (token.write_eventfd, token.wrap_eventfd) + } + ) + async with ( + recv_p.open_context( + child_blocked_sender, + token=token + ) as (sctx, _sent), + ): + await trio.sleep(1) + await an.cancel() + + + with pytest.raises(tractor._exceptions.ContextCancelled): + trio.run(main) diff --git a/tests/test_root_runtime.py b/tests/test_root_runtime.py new file mode 100644 index 00000000..6fc39b7d --- /dev/null +++ b/tests/test_root_runtime.py @@ -0,0 +1,108 @@ +''' +Runtime boot/init sanity. + +''' + +import pytest +import trio + +import tractor +from tractor._exceptions import RuntimeFailure + + +@tractor.context +async def open_new_root_in_sub( + ctx: tractor.Context, +) -> None: + + async with tractor.open_root_actor(): + pass + + +@pytest.mark.parametrize( + 'open_root_in', + ['root', 'sub'], + ids='open_2nd_root_in={}'.format, +) +def test_only_one_root_actor( + open_root_in: str, + reg_addr: tuple, + debug_mode: bool +): + ''' + Verify we specially fail whenever more then one root actor + is attempted to be opened within an already opened tree. + + ''' + async def main(): + async with tractor.open_nursery() as an: + + if open_root_in == 'root': + async with tractor.open_root_actor( + registry_addrs=[reg_addr], + ): + pass + + ptl: tractor.Portal = await an.start_actor( + name='bad_rooty_boi', + enable_modules=[__name__], + ) + + async with ptl.open_context( + open_new_root_in_sub, + ) as (ctx, first): + pass + + if open_root_in == 'root': + with pytest.raises( + RuntimeFailure + ) as excinfo: + trio.run(main) + + else: + with pytest.raises( + tractor.RemoteActorError, + ) as excinfo: + trio.run(main) + + assert excinfo.value.boxed_type is RuntimeFailure + + +def test_implicit_root_via_first_nursery( + reg_addr: tuple, + debug_mode: bool +): + ''' + The first `ActorNursery` open should implicitly call + `_root.open_root_actor()`. + + ''' + async def main(): + async with tractor.open_nursery() as an: + assert an._implicit_runtime_started + assert tractor.current_actor().aid.name == 'root' + + trio.run(main) + + +def test_runtime_vars_unset( + reg_addr: tuple, + debug_mode: bool +): + ''' + Ensure any `._state._runtime_vars` are restored to default values + after the root actor-runtime exits! + + ''' + assert not tractor._state._runtime_vars['_debug_mode'] + async def main(): + assert not tractor._state._runtime_vars['_debug_mode'] + async with tractor.open_nursery( + debug_mode=True, + ): + assert tractor._state._runtime_vars['_debug_mode'] + + # after runtime closure, should be reverted! + assert not tractor._state._runtime_vars['_debug_mode'] + + trio.run(main) diff --git a/tests/test_shm.py b/tests/test_shm.py index 2b7a382f..ddeb67aa 100644 --- a/tests/test_shm.py +++ b/tests/test_shm.py @@ -8,7 +8,7 @@ import uuid import pytest import trio import tractor -from tractor._shm import ( +from tractor.ipc._shm import ( open_shm_list, attach_shm_list, ) diff --git a/tests/test_spawning.py b/tests/test_spawning.py index 99ec9abc..30e084d5 100644 --- a/tests/test_spawning.py +++ b/tests/test_spawning.py @@ -2,6 +2,7 @@ Spawning basics """ +from functools import partial from typing import ( Any, ) @@ -12,74 +13,99 @@ import tractor from tractor._testing import tractor_test -data_to_pass_down = {'doggy': 10, 'kitty': 4} +data_to_pass_down = { + 'doggy': 10, + 'kitty': 4, +} async def spawn( - is_arbiter: bool, + should_be_root: bool, data: dict, reg_addr: tuple[str, int], + + debug_mode: bool = False, ): - namespaces = [__name__] - await trio.sleep(0.1) + actor = tractor.current_actor(err_on_no_runtime=False) - async with tractor.open_root_actor( - arbiter_addr=reg_addr, - ): - actor = tractor.current_actor() - assert actor.is_arbiter == is_arbiter - data = data_to_pass_down + if should_be_root: + assert actor is None # no runtime yet + async with ( + tractor.open_root_actor( + arbiter_addr=reg_addr, + ), + tractor.open_nursery() as an, + ): + # now runtime exists + actor: tractor.Actor = tractor.current_actor() + assert actor.is_arbiter == should_be_root - if actor.is_arbiter: - async with tractor.open_nursery() as nursery: + # spawns subproc here + portal: tractor.Portal = await an.run_in_actor( + fn=spawn, - # forks here - portal = await nursery.run_in_actor( - spawn, - is_arbiter=False, - name='sub-actor', - data=data, - reg_addr=reg_addr, - enable_modules=namespaces, - ) + # spawning args + name='sub-actor', + enable_modules=[__name__], - assert len(nursery._children) == 1 - assert portal.channel.uid in tractor.current_actor()._peers - # be sure we can still get the result - result = await portal.result() - assert result == 10 - return result - else: - return 10 + # passed to a subactor-recursive RPC invoke + # of this same `spawn()` fn. + should_be_root=False, + data=data_to_pass_down, + reg_addr=reg_addr, + ) + + assert len(an._children) == 1 + assert ( + portal.channel.uid + in + tractor.current_actor().ipc_server._peers + ) + + # get result from child subactor + result = await portal.result() + assert result == 10 + return result + else: + assert actor.is_arbiter == should_be_root + return 10 -def test_local_arbiter_subactor_global_state( - reg_addr, +def test_run_in_actor_same_func_in_child( + reg_addr: tuple, + debug_mode: bool, ): result = trio.run( - spawn, - True, - data_to_pass_down, - reg_addr, + partial( + spawn, + should_be_root=True, + data=data_to_pass_down, + reg_addr=reg_addr, + debug_mode=debug_mode, + ) ) assert result == 10 async def movie_theatre_question(): - """A question asked in a dark theatre, in a tangent + ''' + A question asked in a dark theatre, in a tangent (errr, I mean different) process. - """ + + ''' return 'have you ever seen a portal?' @tractor_test async def test_movie_theatre_convo(start_method): - """The main ``tractor`` routine. - """ - async with tractor.open_nursery() as n: + ''' + The main ``tractor`` routine. - portal = await n.start_actor( + ''' + async with tractor.open_nursery(debug_mode=True) as an: + + portal = await an.start_actor( 'frank', # enable the actor to run funcs from this current module enable_modules=[__name__], @@ -118,8 +144,8 @@ async def test_most_beautiful_word( with trio.fail_after(1): async with tractor.open_nursery( debug_mode=debug_mode, - ) as n: - portal = await n.run_in_actor( + ) as an: + portal = await an.run_in_actor( cellar_door, return_value=return_value, name='some_linguist', diff --git a/tests/test_trioisms.py b/tests/test_trioisms.py index 9f1ccec9..3343d788 100644 --- a/tests/test_trioisms.py +++ b/tests/test_trioisms.py @@ -180,7 +180,8 @@ def test_acm_embedded_nursery_propagates_enter_err( with tractor.devx.maybe_open_crash_handler( pdb=debug_mode, ) as bxerr: - assert not bxerr.value + if bxerr: + assert not bxerr.value async with ( wraps_tn_that_always_cancels() as tn, diff --git a/tractor/__init__.py b/tractor/__init__.py index 0c011a22..6fac747f 100644 --- a/tractor/__init__.py +++ b/tractor/__init__.py @@ -64,7 +64,7 @@ from ._root import ( run_daemon as run_daemon, open_root_actor as open_root_actor, ) -from ._ipc import Channel as Channel +from .ipc import Channel as Channel from ._portal import Portal as Portal from ._runtime import Actor as Actor # from . import hilevel as hilevel diff --git a/tractor/_addr.py b/tractor/_addr.py new file mode 100644 index 00000000..d8d11227 --- /dev/null +++ b/tractor/_addr.py @@ -0,0 +1,282 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +from __future__ import annotations +from uuid import uuid4 +from typing import ( + Protocol, + ClassVar, + Type, + TYPE_CHECKING, +) + +from bidict import bidict +from trio import ( + SocketListener, +) + +from .log import get_logger +from ._state import ( + _def_tpt_proto, +) +from .ipc._tcp import TCPAddress +from .ipc._uds import UDSAddress + +if TYPE_CHECKING: + from ._runtime import Actor + +log = get_logger(__name__) + + +# TODO, maybe breakout the netns key to a struct? +# class NetNs(Struct)[str, int]: +# ... + +# TODO, can't we just use a type alias +# for this? namely just some `tuple[str, int, str, str]`? +# +# -[ ] would also just be simpler to keep this as SockAddr[tuple] +# or something, implying it's just a simple pair of values which can +# presumably be mapped to all transports? +# -[ ] `pydoc socket.socket.getsockname()` delivers a 4-tuple for +# ipv6 `(hostaddr, port, flowinfo, scope_id)`.. so how should we +# handle that? +# -[ ] as a further alternative to this wrap()/unwrap() approach we +# could just implement `enc/dec_hook()`s for the `Address`-types +# and just deal with our internal objs directly and always and +# leave it to the codec layer to figure out marshalling? +# |_ would mean only one spot to do the `.unwrap()` (which we may +# end up needing to call from the hook()s anyway?) +# -[x] rename to `UnwrappedAddress[Descriptor]` ?? +# seems like the right name as per, +# https://www.geeksforgeeks.org/introduction-to-address-descriptor/ +# +UnwrappedAddress = ( + # tcp/udp/uds + tuple[ + str, # host/domain(tcp), filesys-dir(uds) + int|str, # port/path(uds) + ] + # ?TODO? should we also include another 2 fields from + # our `Aid` msg such that we include the runtime `Actor.uid` + # of `.name` and `.uuid`? + # - would ensure uniqueness across entire net? + # - allows for easier runtime-level filtering of "actors by + # service name" +) + + +# TODO, maybe rename to `SocketAddress`? +class Address(Protocol): + proto_key: ClassVar[str] + unwrapped_type: ClassVar[UnwrappedAddress] + + # TODO, i feel like an `.is_bound()` is a better thing to + # support? + # Lke, what use does this have besides a noop and if it's not + # valid why aren't we erroring on creation/use? + @property + def is_valid(self) -> bool: + ... + + # TODO, maybe `.netns` is a better name? + @property + def namespace(self) -> tuple[str, int]|None: + ''' + The if-available, OS-specific "network namespace" key. + + ''' + ... + + @property + def bindspace(self) -> str: + ''' + Deliver the socket address' "bindable space" from + a `socket.socket.bind()` and thus from the perspective of + specific transport protocol domain. + + I.e. for most (layer-4) network-socket protocols this is + normally the ipv4/6 address, for UDS this is normally + a filesystem (sub-directory). + + For (distributed) network protocols this is normally the routing + layer's domain/(ip-)address, though it might also include a "network namespace" + key different then the default. + + For local-host-only transports this is either an explicit + namespace (with types defined by the OS: netns, Cgroup, IPC, + pid, etc. on linux) or failing that the sub-directory in the + filesys in which socket/shm files are located *under*. + + ''' + ... + + @classmethod + def from_addr(cls, addr: UnwrappedAddress) -> Address: + ... + + def unwrap(self) -> UnwrappedAddress: + ''' + Deliver the underying minimum field set in + a primitive python data type-structure. + ''' + ... + + @classmethod + def get_random( + cls, + current_actor: Actor, + bindspace: str|None = None, + ) -> Address: + ... + + # TODO, this should be something like a `.get_def_registar_addr()` + # or similar since, + # - it should be a **host singleton** (not root/tree singleton) + # - we **only need this value** when one isn't provided to the + # runtime at boot and we want to implicitly provide a host-wide + # registrar. + # - each rooted-actor-tree should likely have its own + # micro-registry (likely the root being it), also see + @classmethod + def get_root(cls) -> Address: + ... + + def __repr__(self) -> str: + ... + + def __eq__(self, other) -> bool: + ... + + async def open_listener( + self, + **kwargs, + ) -> SocketListener: + ... + + async def close_listener(self): + ... + + +_address_types: bidict[str, Type[Address]] = { + 'tcp': TCPAddress, + 'uds': UDSAddress +} + + +# TODO! really these are discovery sys default addrs ONLY useful for +# when none is provided to a root actor on first boot. +_default_lo_addrs: dict[ + str, + UnwrappedAddress +] = { + 'tcp': TCPAddress.get_root().unwrap(), + 'uds': UDSAddress.get_root().unwrap(), +} + + +def get_address_cls(name: str) -> Type[Address]: + return _address_types[name] + + +def is_wrapped_addr(addr: any) -> bool: + return type(addr) in _address_types.values() + + +def mk_uuid() -> str: + ''' + Encapsulate creation of a uuid4 as `str` as used + for creating `Actor.uid: tuple[str, str]` and/or + `.msg.types.Aid`. + + ''' + return str(uuid4()) + + +def wrap_address( + addr: UnwrappedAddress +) -> Address: + ''' + Wrap an `UnwrappedAddress` as an `Address`-type based + on matching builtin python data-structures which we adhoc + use for each. + + XXX NOTE, careful care must be placed to ensure + `UnwrappedAddress` cases are **definitely unique** otherwise the + wrong transport backend may be loaded and will break many + low-level things in our runtime in a not-fun-to-debug way! + + XD + + ''' + if is_wrapped_addr(addr): + return addr + + cls: Type|None = None + # if 'sock' in addr[0]: + # import pdbp; pdbp.set_trace() + match addr: + + # classic network socket-address as tuple/list + case ( + (str(), int()) + | + [str(), int()] + ): + cls = TCPAddress + + case ( + # (str()|Path(), str()|Path()), + # ^TODO? uhh why doesn't this work!? + + (_, filename) + ) if type(filename) is str: + cls = UDSAddress + + # likely an unset UDS or TCP reg address as defaulted in + # `_state._runtime_vars['_root_mailbox']` + # + # TODO? figure out when/if we even need this? + case ( + None + | + [None, None] + ): + cls: Type[Address] = get_address_cls(_def_tpt_proto) + addr: UnwrappedAddress = cls.get_root().unwrap() + + case _: + # import pdbp; pdbp.set_trace() + raise TypeError( + f'Can not wrap unwrapped-address ??\n' + f'type(addr): {type(addr)!r}\n' + f'addr: {addr!r}\n' + ) + + return cls.from_addr(addr) + + +def default_lo_addrs( + transports: list[str], +) -> list[Type[Address]]: + ''' + Return the default, host-singleton, registry address + for an input transport key set. + + ''' + return [ + _default_lo_addrs[transport] + for transport in transports + ] diff --git a/tractor/_child.py b/tractor/_child.py index 4226ae90..d2f03f55 100644 --- a/tractor/_child.py +++ b/tractor/_child.py @@ -31,8 +31,12 @@ def parse_uid(arg): return str(name), str(uuid) # ensures str encoding def parse_ipaddr(arg): - host, port = literal_eval(arg) - return (str(host), int(port)) + try: + return literal_eval(arg) + + except (ValueError, SyntaxError): + # UDS: try to interpret as a straight up str + return arg if __name__ == "__main__": @@ -46,8 +50,8 @@ if __name__ == "__main__": args = parser.parse_args() subactor = Actor( - args.uid[0], - uid=args.uid[1], + name=args.uid[0], + uuid=args.uid[1], loglevel=args.loglevel, spawn_method="trio" ) diff --git a/tractor/_context.py b/tractor/_context.py index 201e920a..6d817d58 100644 --- a/tractor/_context.py +++ b/tractor/_context.py @@ -89,7 +89,7 @@ from .msg import ( pretty_struct, _ops as msgops, ) -from ._ipc import ( +from .ipc import ( Channel, ) from ._streaming import ( @@ -105,7 +105,7 @@ from ._state import ( if TYPE_CHECKING: from ._portal import Portal from ._runtime import Actor - from ._ipc import MsgTransport + from .ipc._transport import MsgTransport from .devx._frame_stack import ( CallerInfo, ) @@ -292,7 +292,7 @@ class Context: # - `._runtime._invoke()` will check this flag before engaging # the crash handler REPL in such cases where the "callee" # raises the cancellation, - # - `.devx._debug.lock_stdio_for_peer()` will set it to `False` if + # - `.devx.debug.lock_stdio_for_peer()` will set it to `False` if # the global tty-lock has been configured to filter out some # actors from being able to acquire the debugger lock. _enter_debugger_on_cancel: bool = True @@ -366,7 +366,7 @@ class Context: # f' ---\n' f' |_ipc: {self.dst_maddr}\n' # f' dst_maddr{ds}{self.dst_maddr}\n' - f" uid{ds}'{self.chan.uid}'\n" + f" uid{ds}'{self.chan.aid}'\n" f" cid{ds}'{self.cid}'\n" # f' ---\n' f'\n' @@ -859,19 +859,10 @@ class Context: @property def dst_maddr(self) -> str: chan: Channel = self.chan - dst_addr, dst_port = chan.raddr trans: MsgTransport = chan.transport # cid: str = self.cid # cid_head, cid_tail = cid[:6], cid[-6:] - return ( - f'/ipv4/{dst_addr}' - f'/{trans.name_key}/{dst_port}' - # f'/{self.chan.uid[0]}' - # f'/{self.cid}' - - # f'/cid={cid_head}..{cid_tail}' - # TODO: ? not use this ^ right ? - ) + return trans.maddr dmaddr = dst_maddr @@ -954,10 +945,10 @@ class Context: reminfo: str = ( # ' =>\n' # f'Context.cancel() => {self.chan.uid}\n' + f'\n' f'c)=> {self.chan.uid}\n' - # f'{self.chan.uid}\n' - f' |_ @{self.dst_maddr}\n' - f' >> {self.repr_rpc}\n' + f' |_[{self.dst_maddr}\n' + f' >>{self.repr_rpc}\n' # f' >> {self._nsf}() -> {codec}[dict]:\n\n' # TODO: pull msg-type from spec re #320 ) @@ -1078,9 +1069,25 @@ class Context: |RemoteActorError # stream overrun caused and ignored by us ): ''' - Maybe raise a remote error depending on the type of error - and *who* (i.e. which task from which actor) requested - a cancellation (if any). + Maybe raise a remote error depending on the type of error and + *who*, i.e. which side of the task pair across actors, + requested a cancellation (if any). + + Depending on the input config-params suppress raising + certain remote excs: + + - if `remote_error: ContextCancelled` (ctxc) AND this side's + task is the "requester", it at somem point called + `Context.cancel()`, then the peer's ctxc is treated + as a "cancel ack". + + |_ this behaves exactly like how `trio.Nursery.cancel_scope` + absorbs any `BaseExceptionGroup[trio.Cancelled]` wherein the + owning parent task never will raise a `trio.Cancelled` + if `CancelScope.cancel_called == True`. + + - `remote_error: StreamOverrrun` (overrun) AND + `raise_overrun_from_self` is set. ''' __tracebackhide__: bool = hide_tb @@ -1122,18 +1129,19 @@ class Context: # for this ^, NO right? ) or ( - # NOTE: whenever this context is the cause of an - # overrun on the remote side (aka we sent msgs too - # fast that the remote task was overrun according - # to `MsgStream` buffer settings) AND the caller - # has requested to not raise overruns this side - # caused, we also silently absorb any remotely - # boxed `StreamOverrun`. This is mostly useful for - # supressing such faults during - # cancellation/error/final-result handling inside - # `msg._ops.drain_to_final_msg()` such that we do not - # raise such errors particularly in the case where + # NOTE: whenever this side is the cause of an + # overrun on the peer side, i.e. we sent msgs too + # fast and the peer task was overrun according + # to `MsgStream` buffer settings, AND this was + # called with `raise_overrun_from_self=True` (the + # default), silently absorb any `StreamOverrun`. + # + # XXX, this is namely useful for supressing such faults + # during cancellation/error/final-result handling inside + # `.msg._ops.drain_to_final_msg()` such that we do not + # raise during a cancellation-request, i.e. when # `._cancel_called == True`. + # not raise_overrun_from_self and isinstance(remote_error, RemoteActorError) and remote_error.boxed_type is StreamOverrun @@ -1243,8 +1251,8 @@ class Context: # ?XXX, should already be set in `._deliver_msg()` right? if self._outcome_msg is not Unresolved: - # from .devx import _debug - # await _debug.pause() + # from .devx import debug + # await debug.pause() assert self._outcome_msg is outcome_msg else: self._outcome_msg = outcome_msg @@ -2179,7 +2187,7 @@ async def open_context_from_portal( # debugging the tractor-runtime itself using it's # own `.devx.` tooling! # - # await _debug.pause() + # await debug.pause() # CASE 2: context was cancelled by local task calling # `.cancel()`, we don't raise and the exit block should @@ -2246,7 +2254,7 @@ async def open_context_from_portal( # NOTE: `Context.cancel()` is conversely NEVER CALLED in # the `ContextCancelled` "self cancellation absorbed" case # handled in the block above ^^^ !! - # await _debug.pause() + # await debug.pause() # log.cancel( match scope_err: case trio.Cancelled: @@ -2261,11 +2269,11 @@ async def open_context_from_portal( ) if debug_mode(): - # async with _debug.acquire_debug_lock(portal.actor.uid): + # async with debug.acquire_debug_lock(portal.actor.uid): # pass # TODO: factor ^ into below for non-root cases? # - from .devx._debug import maybe_wait_for_debugger + from .devx.debug import maybe_wait_for_debugger was_acquired: bool = await maybe_wait_for_debugger( # header_msg=( # 'Delaying `ctx.cancel()` until debug lock ' @@ -2328,8 +2336,8 @@ async def open_context_from_portal( raise # yes this worx! - # from .devx import _debug - # await _debug.pause() + # from .devx import debug + # await debug.pause() # an exception type boxed in a `RemoteActorError` # is returned (meaning it was obvi not raised) @@ -2364,7 +2372,7 @@ async def open_context_from_portal( # where the root is waiting on the lock to clear but the # child has already cleared it and clobbered IPC. if debug_mode(): - from .devx._debug import maybe_wait_for_debugger + from .devx.debug import maybe_wait_for_debugger await maybe_wait_for_debugger() # though it should be impossible for any tasks diff --git a/tractor/_discovery.py b/tractor/_discovery.py index a681c63b..f6b3b585 100644 --- a/tractor/_discovery.py +++ b/tractor/_discovery.py @@ -29,7 +29,12 @@ from contextlib import asynccontextmanager as acm from tractor.log import get_logger from .trionics import gather_contexts -from ._ipc import _connect_chan, Channel +from .ipc import _connect_chan, Channel +from ._addr import ( + UnwrappedAddress, + Address, + wrap_address +) from ._portal import ( Portal, open_portal, @@ -38,6 +43,7 @@ from ._portal import ( from ._state import ( current_actor, _runtime_vars, + _def_tpt_proto, ) if TYPE_CHECKING: @@ -49,9 +55,7 @@ log = get_logger(__name__) @acm async def get_registry( - host: str, - port: int, - + addr: UnwrappedAddress|None = None, ) -> AsyncGenerator[ Portal | LocalPortal | None, None, @@ -69,13 +73,15 @@ async def get_registry( # (likely a re-entrant call from the arbiter actor) yield LocalPortal( actor, - Channel((host, port)) + Channel(transport=None) + # ^XXX, we DO NOT actually provide nor connect an + # underlying transport since this is merely an API shim. ) else: # TODO: try to look pre-existing connection from - # `Actor._peers` and use it instead? + # `Server._peers` and use it instead? async with ( - _connect_chan(host, port) as chan, + _connect_chan(addr) as chan, open_portal(chan) as regstr_ptl, ): yield regstr_ptl @@ -89,11 +95,10 @@ async def get_root( # TODO: rename mailbox to `_root_maddr` when we finally # add and impl libp2p multi-addrs? - host, port = _runtime_vars['_root_mailbox'] - assert host is not None + addr = _runtime_vars['_root_mailbox'] async with ( - _connect_chan(host, port) as chan, + _connect_chan(addr) as chan, open_portal(chan, **kwargs) as portal, ): yield portal @@ -106,17 +111,23 @@ def get_peer_by_name( ) -> list[Channel]|None: # at least 1 ''' Scan for an existing connection (set) to a named actor - and return any channels from `Actor._peers`. + and return any channels from `Server._peers: dict`. This is an optimization method over querying the registrar for the same info. ''' actor: Actor = current_actor() - to_scan: dict[tuple, list[Channel]] = actor._peers.copy() - pchan: Channel|None = actor._parent_chan - if pchan: - to_scan[pchan.uid].append(pchan) + to_scan: dict[tuple, list[Channel]] = actor.ipc_server._peers.copy() + + # TODO: is this ever needed? creates a duplicate channel on actor._peers + # when multiple find_actor calls are made to same actor from a single ctx + # which causes actor exit to hang waiting forever on + # `actor._no_more_peers.wait()` in `_runtime.async_main` + + # pchan: Channel|None = actor._parent_chan + # if pchan and pchan.uid not in to_scan: + # to_scan[pchan.uid].append(pchan) for aid, chans in to_scan.items(): _, peer_name = aid @@ -134,10 +145,10 @@ def get_peer_by_name( @acm async def query_actor( name: str, - regaddr: tuple[str, int]|None = None, + regaddr: UnwrappedAddress|None = None, ) -> AsyncGenerator[ - tuple[str, int]|None, + UnwrappedAddress|None, None, ]: ''' @@ -163,31 +174,31 @@ async def query_actor( return reg_portal: Portal - regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0] - async with get_registry(*regaddr) as reg_portal: + regaddr: Address = wrap_address(regaddr) or actor.reg_addrs[0] + async with get_registry(regaddr) as reg_portal: # TODO: return portals to all available actors - for now # just the last one that registered - sockaddr: tuple[str, int] = await reg_portal.run_from_ns( + addr: UnwrappedAddress = await reg_portal.run_from_ns( 'self', 'find_actor', name=name, ) - yield sockaddr + yield addr @acm async def maybe_open_portal( - addr: tuple[str, int], + addr: UnwrappedAddress, name: str, ): async with query_actor( name=name, regaddr=addr, - ) as sockaddr: + ) as addr: pass - if sockaddr: - async with _connect_chan(*sockaddr) as chan: + if addr: + async with _connect_chan(addr) as chan: async with open_portal(chan) as portal: yield portal else: @@ -197,7 +208,8 @@ async def maybe_open_portal( @acm async def find_actor( name: str, - registry_addrs: list[tuple[str, int]]|None = None, + registry_addrs: list[UnwrappedAddress]|None = None, + enable_transports: list[str] = [_def_tpt_proto], only_first: bool = True, raise_on_none: bool = False, @@ -224,15 +236,15 @@ async def find_actor( # XXX NOTE: make sure to dynamically read the value on # every call since something may change it globally (eg. # like in our discovery test suite)! - from . import _root + from ._addr import default_lo_addrs registry_addrs = ( _runtime_vars['_registry_addrs'] or - _root._default_lo_addrs + default_lo_addrs(enable_transports) ) maybe_portals: list[ - AsyncContextManager[tuple[str, int]] + AsyncContextManager[UnwrappedAddress] ] = list( maybe_open_portal( addr=addr, @@ -274,7 +286,7 @@ async def find_actor( @acm async def wait_for_actor( name: str, - registry_addr: tuple[str, int] | None = None, + registry_addr: UnwrappedAddress | None = None, ) -> AsyncGenerator[Portal, None]: ''' @@ -291,7 +303,7 @@ async def wait_for_actor( yield peer_portal return - regaddr: tuple[str, int] = ( + regaddr: UnwrappedAddress = ( registry_addr or actor.reg_addrs[0] @@ -299,8 +311,8 @@ async def wait_for_actor( # TODO: use `.trionics.gather_contexts()` like # above in `find_actor()` as well? reg_portal: Portal - async with get_registry(*regaddr) as reg_portal: - sockaddrs = await reg_portal.run_from_ns( + async with get_registry(regaddr) as reg_portal: + addrs = await reg_portal.run_from_ns( 'self', 'wait_for_actor', name=name, @@ -308,8 +320,8 @@ async def wait_for_actor( # get latest registered addr by default? # TODO: offer multi-portal yields in multi-homed case? - sockaddr: tuple[str, int] = sockaddrs[-1] + addr: UnwrappedAddress = addrs[-1] - async with _connect_chan(*sockaddr) as chan: + async with _connect_chan(addr) as chan: async with open_portal(chan) as portal: yield portal diff --git a/tractor/_entry.py b/tractor/_entry.py index 8156d25f..400ce66d 100644 --- a/tractor/_entry.py +++ b/tractor/_entry.py @@ -22,7 +22,6 @@ from __future__ import annotations from functools import partial import multiprocessing as mp import os -import textwrap from typing import ( Any, TYPE_CHECKING, @@ -35,8 +34,12 @@ from .log import ( get_logger, ) from . import _state -from .devx import _debug +from .devx import ( + _frame_stack, + pformat, +) from .to_asyncio import run_as_asyncio_guest +from ._addr import UnwrappedAddress from ._runtime import ( async_main, Actor, @@ -52,10 +55,10 @@ log = get_logger(__name__) def _mp_main( actor: Actor, - accept_addrs: list[tuple[str, int]], + accept_addrs: list[UnwrappedAddress], forkserver_info: tuple[Any, Any, Any, Any, Any], start_method: SpawnMethodKey, - parent_addr: tuple[str, int] | None = None, + parent_addr: UnwrappedAddress | None = None, infect_asyncio: bool = False, ) -> None: @@ -102,111 +105,10 @@ def _mp_main( ) -# TODO: move this func to some kinda `.devx._conc_lang.py` eventually -# as we work out our multi-domain state-flow-syntax! -def nest_from_op( - input_op: str, - # - # ?TODO? an idea for a syntax to the state of concurrent systems - # as a "3-domain" (execution, scope, storage) model and using - # a minimal ascii/utf-8 operator-set. - # - # try not to take any of this seriously yet XD - # - # > is a "play operator" indicating (CPU bound) - # exec/work/ops required at the "lowest level computing" - # - # execution primititves (tasks, threads, actors..) denote their - # lifetime with '(' and ')' since parentheses normally are used - # in many langs to denote function calls. - # - # starting = ( - # >( opening/starting; beginning of the thread-of-exec (toe?) - # (> opened/started, (finished spawning toe) - # |_ repr of toe, in py these look like - # - # >) closing/exiting/stopping, - # )> closed/exited/stopped, - # |_ - # [OR <), )< ?? ] - # - # ending = ) - # >c) cancelling to close/exit - # c)> cancelled (caused close), OR? - # |_ - # OR maybe "x) erroring to eventuall exit - # x)> errored and terminated - # |_ - # - # scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc. - # >{ opening - # {> opened - # }> closed - # >} closing - # - # storage: like queues, shm-buffers, files, etc.. - # >[ opening - # [> opened - # |_ - # - # >] closing - # ]> closed - - # IPC ops: channels, transports, msging - # => req msg - # <= resp msg - # <=> 2-way streaming (of msgs) - # <- recv 1 msg - # -> send 1 msg - # - # TODO: still not sure on R/L-HS approach..? - # =>( send-req to exec start (task, actor, thread..) - # (<= recv-req to ^ - # - # (<= recv-req ^ - # <=( recv-resp opened remote exec primitive - # <=) recv-resp closed - # - # )<=c req to stop due to cancel - # c=>) req to stop due to cancel - # - # =>{ recv-req to open - # <={ send-status that it closed - - tree_str: str, - - # NOTE: so move back-from-the-left of the `input_op` by - # this amount. - back_from_op: int = 0, -) -> str: - ''' - Depth-increment the input (presumably hierarchy/supervision) - input "tree string" below the provided `input_op` execution - operator, so injecting a `"\n|_{input_op}\n"`and indenting the - `tree_str` to nest content aligned with the ops last char. - - ''' - return ( - f'{input_op}\n' - + - textwrap.indent( - tree_str, - prefix=( - len(input_op) - - - (back_from_op + 1) - ) * ' ', - ) - ) - - def _trio_main( actor: Actor, *, - parent_addr: tuple[str, int] | None = None, + parent_addr: UnwrappedAddress|None = None, infect_asyncio: bool = False, ) -> None: @@ -214,7 +116,7 @@ def _trio_main( Entry point for a `trio_run_in_process` subactor. ''' - _debug.hide_runtime_frames() + _frame_stack.hide_runtime_frames() _state._current_actor = actor trio_main = partial( @@ -235,7 +137,7 @@ def _trio_main( log.info( 'Starting new `trio` subactor:\n' + - nest_from_op( + pformat.nest_from_op( input_op='>(', # see syntax ideas above tree_str=actor_info, back_from_op=2, # since "complete" @@ -245,7 +147,7 @@ def _trio_main( exit_status: str = ( 'Subactor exited\n' + - nest_from_op( + pformat.nest_from_op( input_op=')>', # like a "closed-to-play"-icon from super perspective tree_str=actor_info, back_from_op=1, @@ -263,7 +165,7 @@ def _trio_main( exit_status: str = ( 'Actor received KBI (aka an OS-cancel)\n' + - nest_from_op( + pformat.nest_from_op( input_op='c)>', # closed due to cancel (see above) tree_str=actor_info, ) @@ -273,7 +175,7 @@ def _trio_main( exit_status: str = ( 'Main actor task exited due to crash?\n' + - nest_from_op( + pformat.nest_from_op( input_op='x)>', # closed by error tree_str=actor_info, ) diff --git a/tractor/_exceptions.py b/tractor/_exceptions.py index f9e18e18..3561c7c6 100644 --- a/tractor/_exceptions.py +++ b/tractor/_exceptions.py @@ -23,7 +23,6 @@ import builtins import importlib from pprint import pformat from pdb import bdb -import sys from types import ( TracebackType, ) @@ -65,15 +64,29 @@ if TYPE_CHECKING: from ._context import Context from .log import StackLevelAdapter from ._stream import MsgStream - from ._ipc import Channel + from .ipc import Channel log = get_logger('tractor') _this_mod = importlib.import_module(__name__) -class ActorFailure(Exception): - "General actor failure" +class RuntimeFailure(RuntimeError): + ''' + General `Actor`-runtime failure due to, + + - a bad runtime-env, + - falied spawning (bad input to process), + - API usage. + + ''' + + +class ActorFailure(RuntimeFailure): + ''' + `Actor` failed to boot before/after spawn + + ''' class InternalError(RuntimeError): @@ -126,6 +139,12 @@ class TrioTaskExited(Exception): ''' +class DebugRequestError(RuntimeError): + ''' + Failed to request stdio lock from root actor! + + ''' + # NOTE: more or less should be close to these: # 'boxed_type', # 'src_type', @@ -191,6 +210,8 @@ def get_err_type(type_name: str) -> BaseException|None: ): return type_ref + return None + def pack_from_raise( local_err: ( @@ -521,7 +542,6 @@ class RemoteActorError(Exception): if val: _repr += f'{key}={val_str}{end_char}' - return _repr def reprol(self) -> str: @@ -600,56 +620,9 @@ class RemoteActorError(Exception): the type name is already implicitly shown by python). ''' - header: str = '' - body: str = '' - message: str = '' - - # XXX when the currently raised exception is this instance, - # we do not ever use the "type header" style repr. - is_being_raised: bool = False - if ( - (exc := sys.exception()) - and - exc is self - ): - is_being_raised: bool = True - - with_type_header: bool = ( - with_type_header - and - not is_being_raised - ) - - # style - if with_type_header: - header: str = f'<{type(self).__name__}(' - - if message := self._message: - - # split off the first line so, if needed, it isn't - # indented the same like the "boxed content" which - # since there is no `.tb_str` is just the `.message`. - lines: list[str] = message.splitlines() - first: str = lines[0] - message: str = message.removeprefix(first) - - # with a type-style header we, - # - have no special message "first line" extraction/handling - # - place the message a space in from the header: - # `MsgTypeError( ..` - # ^-here - # - indent the `.message` inside the type body. - if with_type_header: - first = f' {first} )>' - - message: str = textwrap.indent( - message, - prefix=' '*2, - ) - message: str = first + message - # IFF there is an embedded traceback-str we always # draw the ascii-box around it. + body: str = '' if tb_str := self.tb_str: fields: str = self._mk_fields_str( _body_fields @@ -670,21 +643,15 @@ class RemoteActorError(Exception): boxer_header=self.relay_uid, ) - tail = '' - if ( - with_type_header - and not message - ): - tail: str = '>' - - return ( - header - + - message - + - f'{body}' - + - tail + # !TODO, it'd be nice to import these top level without + # cycles! + from tractor.devx.pformat import ( + pformat_exc, + ) + return pformat_exc( + exc=self, + with_type_header=with_type_header, + body=body, ) __repr__ = pformat @@ -962,7 +929,7 @@ class StreamOverrun( ''' -class TransportClosed(trio.BrokenResourceError): +class TransportClosed(Exception): ''' IPC transport (protocol) connection was closed or broke and indicates that the wrapping communication `Channel` can no longer @@ -973,24 +940,39 @@ class TransportClosed(trio.BrokenResourceError): self, message: str, loglevel: str = 'transport', - cause: BaseException|None = None, + src_exc: Exception|None = None, raise_on_report: bool = False, ) -> None: self.message: str = message - self._loglevel = loglevel + self._loglevel: str = loglevel super().__init__(message) - if cause is not None: - self.__cause__ = cause + self._src_exc = src_exc + # set the cause manually if not already set by python + if ( + src_exc is not None + and + not self.__cause__ + ): + self.__cause__ = src_exc # flag to toggle whether the msg loop should raise # the exc in its `TransportClosed` handler block. self._raise_on_report = raise_on_report + @property + def src_exc(self) -> Exception: + return ( + self.__cause__ + or + self._src_exc + ) + def report_n_maybe_raise( self, message: str|None = None, + hide_tb: bool = True, ) -> None: ''' @@ -998,9 +980,10 @@ class TransportClosed(trio.BrokenResourceError): for this error. ''' + __tracebackhide__: bool = hide_tb message: str = message or self.message # when a cause is set, slap it onto the log emission. - if cause := self.__cause__: + if cause := self.src_exc: cause_tb_str: str = ''.join( traceback.format_tb(cause.__traceback__) ) @@ -1009,13 +992,86 @@ class TransportClosed(trio.BrokenResourceError): f' {cause}\n' # exc repr ) - getattr(log, self._loglevel)(message) + getattr( + log, + self._loglevel + )(message) # some errors we want to blow up from # inside the RPC msg loop if self._raise_on_report: raise self from cause + @classmethod + def repr_src_exc( + self, + src_exc: Exception|None = None, + ) -> str: + + if src_exc is None: + return '' + + src_msg: tuple[str] = src_exc.args + src_exc_repr: str = ( + f'{type(src_exc).__name__}[ {src_msg} ]' + ) + return src_exc_repr + + def pformat(self) -> str: + from tractor.devx.pformat import ( + pformat_exc, + ) + return pformat_exc( + exc=self, + ) + + # delegate to `str`-ified pformat + __repr__ = pformat + + @classmethod + def from_src_exc( + cls, + src_exc: ( + Exception| + trio.ClosedResource| + trio.BrokenResourceError + ), + message: str, + body: str = '', + **init_kws, + ) -> TransportClosed: + ''' + Convenience constructor for creation from an underlying + `trio`-sourced async-resource/chan/stream error. + + Embeds the original `src_exc`'s repr within the + `Exception.args` via a first-line-in-`.message`-put-in-header + pre-processing and allows inserting additional content beyond + the main message via a `body: str`. + + ''' + repr_src_exc: str = cls.repr_src_exc( + src_exc, + ) + next_line: str = f' src_exc: {repr_src_exc}\n' + if body: + body: str = textwrap.indent( + body, + prefix=' '*2, + ) + + return TransportClosed( + message=( + message + + + next_line + + + body + ), + src_exc=src_exc, + **init_kws, + ) + class NoResult(RuntimeError): "No final result is expected for this actor" diff --git a/tractor/_ipc.py b/tractor/_ipc.py deleted file mode 100644 index 83186147..00000000 --- a/tractor/_ipc.py +++ /dev/null @@ -1,820 +0,0 @@ -# tractor: structured concurrent "actors". -# Copyright 2018-eternity Tyler Goodlet. - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -""" -Inter-process comms abstractions - -""" -from __future__ import annotations -from collections.abc import ( - AsyncGenerator, - AsyncIterator, -) -from contextlib import ( - asynccontextmanager as acm, - contextmanager as cm, -) -import platform -from pprint import pformat -import struct -import typing -from typing import ( - Any, - Callable, - runtime_checkable, - Protocol, - Type, - TypeVar, -) - -import msgspec -from tricycle import BufferedReceiveStream -import trio - -from tractor.log import get_logger -from tractor._exceptions import ( - MsgTypeError, - pack_from_raise, - TransportClosed, - _mk_send_mte, - _mk_recv_mte, -) -from tractor.msg import ( - _ctxvar_MsgCodec, - # _codec, XXX see `self._codec` sanity/debug checks - MsgCodec, - types as msgtypes, - pretty_struct, -) - -log = get_logger(__name__) - -_is_windows = platform.system() == 'Windows' - - -def get_stream_addrs( - stream: trio.SocketStream -) -> tuple[ - tuple[str, int], # local - tuple[str, int], # remote -]: - ''' - Return the `trio` streaming transport prot's socket-addrs for - both the local and remote sides as a pair. - - ''' - # rn, should both be IP sockets - lsockname = stream.socket.getsockname() - rsockname = stream.socket.getpeername() - return ( - tuple(lsockname[:2]), - tuple(rsockname[:2]), - ) - - -# from tractor.msg.types import MsgType -# ?TODO? this should be our `Union[*msgtypes.__spec__]` alias now right..? -# => BLEH, except can't bc prots must inherit typevar or param-spec -# vars.. -MsgType = TypeVar('MsgType') - - -# TODO: break up this mod into a subpkg so we can start adding new -# backends and move this type stuff into a dedicated file.. Bo -# -@runtime_checkable -class MsgTransport(Protocol[MsgType]): -# -# ^-TODO-^ consider using a generic def and indexing with our -# eventual msg definition/types? -# - https://docs.python.org/3/library/typing.html#typing.Protocol - - stream: trio.SocketStream - drained: list[MsgType] - - def __init__(self, stream: trio.SocketStream) -> None: - ... - - # XXX: should this instead be called `.sendall()`? - async def send(self, msg: MsgType) -> None: - ... - - async def recv(self) -> MsgType: - ... - - def __aiter__(self) -> MsgType: - ... - - def connected(self) -> bool: - ... - - # defining this sync otherwise it causes a mypy error because it - # can't figure out it's a generator i guess?..? - def drain(self) -> AsyncIterator[dict]: - ... - - @property - def laddr(self) -> tuple[str, int]: - ... - - @property - def raddr(self) -> tuple[str, int]: - ... - - -# TODO: typing oddity.. not sure why we have to inherit here, but it -# seems to be an issue with `get_msg_transport()` returning -# a `Type[Protocol]`; probably should make a `mypy` issue? -class MsgpackTCPStream(MsgTransport): - ''' - A ``trio.SocketStream`` delivering ``msgpack`` formatted data - using the ``msgspec`` codec lib. - - ''' - layer_key: int = 4 - name_key: str = 'tcp' - - # TODO: better naming for this? - # -[ ] check how libp2p does naming for such things? - codec_key: str = 'msgpack' - - def __init__( - self, - stream: trio.SocketStream, - prefix_size: int = 4, - - # XXX optionally provided codec pair for `msgspec`: - # https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types - # - # TODO: define this as a `Codec` struct which can be - # overriden dynamically by the application/runtime? - codec: tuple[ - Callable[[Any], Any]|None, # coder - Callable[[type, Any], Any]|None, # decoder - ]|None = None, - - ) -> None: - - self.stream = stream - assert self.stream.socket - - # should both be IP sockets - self._laddr, self._raddr = get_stream_addrs(stream) - - # create read loop instance - self._aiter_pkts = self._iter_packets() - self._send_lock = trio.StrictFIFOLock() - - # public i guess? - self.drained: list[dict] = [] - - self.recv_stream = BufferedReceiveStream( - transport_stream=stream - ) - self.prefix_size = prefix_size - - # allow for custom IPC msg interchange format - # dynamic override Bo - self._task = trio.lowlevel.current_task() - - # XXX for ctxvar debug only! - # self._codec: MsgCodec = ( - # codec - # or - # _codec._ctxvar_MsgCodec.get() - # ) - - async def _iter_packets(self) -> AsyncGenerator[dict, None]: - ''' - Yield `bytes`-blob decoded packets from the underlying TCP - stream using the current task's `MsgCodec`. - - This is a streaming routine implemented as an async generator - func (which was the original design, but could be changed?) - and is allocated by a `.__call__()` inside `.__init__()` where - it is assigned to the `._aiter_pkts` attr. - - ''' - decodes_failed: int = 0 - - while True: - try: - header: bytes = await self.recv_stream.receive_exactly(4) - except ( - ValueError, - ConnectionResetError, - - # not sure entirely why we need this but without it we - # seem to be getting racy failures here on - # arbiter/registry name subs.. - trio.BrokenResourceError, - - ) as trans_err: - - loglevel = 'transport' - match trans_err: - # case ( - # ConnectionResetError() - # ): - # loglevel = 'transport' - - # peer actor (graceful??) TCP EOF but `tricycle` - # seems to raise a 0-bytes-read? - case ValueError() if ( - 'unclean EOF' in trans_err.args[0] - ): - pass - - # peer actor (task) prolly shutdown quickly due - # to cancellation - case trio.BrokenResourceError() if ( - 'Connection reset by peer' in trans_err.args[0] - ): - pass - - # unless the disconnect condition falls under "a - # normal operation breakage" we usualy console warn - # about it. - case _: - loglevel: str = 'warning' - - - raise TransportClosed( - message=( - f'IPC transport already closed by peer\n' - f'x]> {type(trans_err)}\n' - f' |_{self}\n' - ), - loglevel=loglevel, - ) from trans_err - - # XXX definitely can happen if transport is closed - # manually by another `trio.lowlevel.Task` in the - # same actor; we use this in some simulated fault - # testing for ex, but generally should never happen - # under normal operation! - # - # NOTE: as such we always re-raise this error from the - # RPC msg loop! - except trio.ClosedResourceError as closure_err: - raise TransportClosed( - message=( - f'IPC transport already manually closed locally?\n' - f'x]> {type(closure_err)} \n' - f' |_{self}\n' - ), - loglevel='error', - raise_on_report=( - closure_err.args[0] == 'another task closed this fd' - or - closure_err.args[0] in ['another task closed this fd'] - ), - ) from closure_err - - # graceful TCP EOF disconnect - if header == b'': - raise TransportClosed( - message=( - f'IPC transport already gracefully closed\n' - f']>\n' - f' |_{self}\n' - ), - loglevel='transport', - # cause=??? # handy or no? - ) - - size: int - size, = struct.unpack(" None: - ''' - Send a msgpack encoded py-object-blob-as-msg over TCP. - - If `strict_types == True` then a `MsgTypeError` will be raised on any - invalid msg type - - ''' - __tracebackhide__: bool = hide_tb - - # XXX see `trio._sync.AsyncContextManagerMixin` for details - # on the `.acquire()`/`.release()` sequencing.. - async with self._send_lock: - - # NOTE: lookup the `trio.Task.context`'s var for - # the current `MsgCodec`. - codec: MsgCodec = _ctxvar_MsgCodec.get() - - # XXX for ctxvar debug only! - # if self._codec.pld_spec != codec.pld_spec: - # self._codec = codec - # log.runtime( - # f'Using new codec in {self}.send()\n' - # f'codec: {self._codec}\n\n' - # f'msg: {msg}\n' - # ) - - if type(msg) not in msgtypes.__msg_types__: - if strict_types: - raise _mk_send_mte( - msg, - codec=codec, - ) - else: - log.warning( - 'Sending non-`Msg`-spec msg?\n\n' - f'{msg}\n' - ) - - try: - bytes_data: bytes = codec.encode(msg) - except TypeError as _err: - typerr = _err - msgtyperr: MsgTypeError = _mk_send_mte( - msg, - codec=codec, - message=( - f'IPC-msg-spec violation in\n\n' - f'{pretty_struct.Struct.pformat(msg)}' - ), - src_type_error=typerr, - ) - raise msgtyperr from typerr - - # supposedly the fastest says, - # https://stackoverflow.com/a/54027962 - size: bytes = struct.pack(" - # except BaseException as _err: - # err = _err - # if not isinstance(err, MsgTypeError): - # __tracebackhide__: bool = False - # raise - - @property - def laddr(self) -> tuple[str, int]: - return self._laddr - - @property - def raddr(self) -> tuple[str, int]: - return self._raddr - - async def recv(self) -> Any: - return await self._aiter_pkts.asend(None) - - async def drain(self) -> AsyncIterator[dict]: - ''' - Drain the stream's remaining messages sent from - the far end until the connection is closed by - the peer. - - ''' - try: - async for msg in self._iter_packets(): - self.drained.append(msg) - except TransportClosed: - for msg in self.drained: - yield msg - - def __aiter__(self): - return self._aiter_pkts - - def connected(self) -> bool: - return self.stream.socket.fileno() != -1 - - -def get_msg_transport( - - key: tuple[str, str], - -) -> Type[MsgTransport]: - - return { - ('msgpack', 'tcp'): MsgpackTCPStream, - }[key] - - -class Channel: - ''' - An inter-process channel for communication between (remote) actors. - - Wraps a ``MsgStream``: transport + encoding IPC connection. - - Currently we only support ``trio.SocketStream`` for transport - (aka TCP) and the ``msgpack`` interchange format via the ``msgspec`` - codec libary. - - ''' - def __init__( - - self, - destaddr: tuple[str, int]|None, - - msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'), - - # TODO: optional reconnection support? - # auto_reconnect: bool = False, - # on_reconnect: typing.Callable[..., typing.Awaitable] = None, - - ) -> None: - - # self._recon_seq = on_reconnect - # self._autorecon = auto_reconnect - - self._destaddr = destaddr - self._transport_key = msg_transport_type_key - - # Either created in ``.connect()`` or passed in by - # user in ``.from_stream()``. - self._stream: trio.SocketStream|None = None - self._transport: MsgTransport|None = None - - # set after handshake - always uid of far end - self.uid: tuple[str, str]|None = None - - self._aiter_msgs = self._iter_msgs() - self._exc: Exception|None = None # set if far end actor errors - self._closed: bool = False - - # flag set by ``Portal.cancel_actor()`` indicating remote - # (possibly peer) cancellation of the far end actor - # runtime. - self._cancel_called: bool = False - - @property - def msgstream(self) -> MsgTransport: - log.info( - '`Channel.msgstream` is an old name, use `._transport`' - ) - return self._transport - - @property - def transport(self) -> MsgTransport: - return self._transport - - @classmethod - def from_stream( - cls, - stream: trio.SocketStream, - **kwargs, - - ) -> Channel: - - src, dst = get_stream_addrs(stream) - chan = Channel( - destaddr=dst, - **kwargs, - ) - - # set immediately here from provided instance - chan._stream: trio.SocketStream = stream - chan.set_msg_transport(stream) - return chan - - def set_msg_transport( - self, - stream: trio.SocketStream, - type_key: tuple[str, str]|None = None, - - # XXX optionally provided codec pair for `msgspec`: - # https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types - codec: MsgCodec|None = None, - - ) -> MsgTransport: - type_key = ( - type_key - or - self._transport_key - ) - # get transport type, then - self._transport = get_msg_transport( - type_key - # instantiate an instance of the msg-transport - )( - stream, - codec=codec, - ) - return self._transport - - @cm - def apply_codec( - self, - codec: MsgCodec, - - ) -> None: - ''' - Temporarily override the underlying IPC msg codec for - dynamic enforcement of messaging schema. - - ''' - orig: MsgCodec = self._transport.codec - try: - self._transport.codec = codec - yield - finally: - self._transport.codec = orig - - # TODO: do a .src/.dst: str for maddrs? - def __repr__(self) -> str: - if not self._transport: - return '' - - return repr( - self._transport.stream.socket._sock - ).replace( # type: ignore - "socket.socket", - "Channel", - ) - - @property - def laddr(self) -> tuple[str, int]|None: - return self._transport.laddr if self._transport else None - - @property - def raddr(self) -> tuple[str, int]|None: - return self._transport.raddr if self._transport else None - - async def connect( - self, - destaddr: tuple[Any, ...] | None = None, - **kwargs - - ) -> MsgTransport: - - if self.connected(): - raise RuntimeError("channel is already connected?") - - destaddr = destaddr or self._destaddr - assert isinstance(destaddr, tuple) - - stream = await trio.open_tcp_stream( - *destaddr, - **kwargs - ) - transport = self.set_msg_transport(stream) - - log.transport( - f'Opened channel[{type(transport)}]: {self.laddr} -> {self.raddr}' - ) - return transport - - # TODO: something like, - # `pdbp.hideframe_on(errors=[MsgTypeError])` - # instead of the `try/except` hack we have rn.. - # seems like a pretty useful thing to have in general - # along with being able to filter certain stack frame(s / sets) - # possibly based on the current log-level? - async def send( - self, - payload: Any, - - hide_tb: bool = False, - - ) -> None: - ''' - Send a coded msg-blob over the transport. - - ''' - __tracebackhide__: bool = hide_tb - try: - log.transport( - '=> send IPC msg:\n\n' - f'{pformat(payload)}\n' - ) - # assert self._transport # but why typing? - await self._transport.send( - payload, - hide_tb=hide_tb, - ) - except BaseException as _err: - err = _err # bind for introspection - if not isinstance(_err, MsgTypeError): - # assert err - __tracebackhide__: bool = False - else: - assert err.cid - - raise - - async def recv(self) -> Any: - assert self._transport - return await self._transport.recv() - - # TODO: auto-reconnect features like 0mq/nanomsg? - # -[ ] implement it manually with nods to SC prot - # possibly on multiple transport backends? - # -> seems like that might be re-inventing scalability - # prots tho no? - # try: - # return await self._transport.recv() - # except trio.BrokenResourceError: - # if self._autorecon: - # await self._reconnect() - # return await self.recv() - # raise - - async def aclose(self) -> None: - - log.transport( - f'Closing channel to {self.uid} ' - f'{self.laddr} -> {self.raddr}' - ) - assert self._transport - await self._transport.stream.aclose() - self._closed = True - - async def __aenter__(self): - await self.connect() - return self - - async def __aexit__(self, *args): - await self.aclose(*args) - - def __aiter__(self): - return self._aiter_msgs - - # ?TODO? run any reconnection sequence? - # -[ ] prolly should be impl-ed as deco-API? - # - # async def _reconnect(self) -> None: - # """Handle connection failures by polling until a reconnect can be - # established. - # """ - # down = False - # while True: - # try: - # with trio.move_on_after(3) as cancel_scope: - # await self.connect() - # cancelled = cancel_scope.cancelled_caught - # if cancelled: - # log.transport( - # "Reconnect timed out after 3 seconds, retrying...") - # continue - # else: - # log.transport("Stream connection re-established!") - - # # on_recon = self._recon_seq - # # if on_recon: - # # await on_recon(self) - - # break - # except (OSError, ConnectionRefusedError): - # if not down: - # down = True - # log.transport( - # f"Connection to {self.raddr} went down, waiting" - # " for re-establishment") - # await trio.sleep(1) - - async def _iter_msgs( - self - ) -> AsyncGenerator[Any, None]: - ''' - Yield `MsgType` IPC msgs decoded and deliverd from - an underlying `MsgTransport` protocol. - - This is a streaming routine alo implemented as an async-gen - func (same a `MsgTransport._iter_pkts()`) gets allocated by - a `.__call__()` inside `.__init__()` where it is assigned to - the `._aiter_msgs` attr. - - ''' - assert self._transport - while True: - try: - async for msg in self._transport: - match msg: - # NOTE: if transport/interchange delivers - # a type error, we pack it with the far - # end peer `Actor.uid` and relay the - # `Error`-msg upward to the `._rpc` stack - # for normal RAE handling. - case MsgTypeError(): - yield pack_from_raise( - local_err=msg, - cid=msg.cid, - - # XXX we pack it here bc lower - # layers have no notion of an - # actor-id ;) - src_uid=self.uid, - ) - case _: - yield msg - - except trio.BrokenResourceError: - - # if not self._autorecon: - raise - - await self.aclose() - - # if self._autorecon: # attempt reconnect - # await self._reconnect() - # continue - - def connected(self) -> bool: - return self._transport.connected() if self._transport else False - - -@acm -async def _connect_chan( - host: str, - port: int - -) -> typing.AsyncGenerator[Channel, None]: - ''' - Create and connect a channel with disconnect on context manager - teardown. - - ''' - chan = Channel((host, port)) - await chan.connect() - yield chan - with trio.CancelScope(shield=True): - await chan.aclose() diff --git a/tractor/_portal.py b/tractor/_portal.py index cee10c47..c741df7d 100644 --- a/tractor/_portal.py +++ b/tractor/_portal.py @@ -43,7 +43,7 @@ from .trionics import maybe_open_nursery from ._state import ( current_actor, ) -from ._ipc import Channel +from .ipc import Channel from .log import get_logger from .msg import ( # Error, @@ -52,8 +52,8 @@ from .msg import ( Return, ) from ._exceptions import ( - # unpack_error, NoResult, + TransportClosed, ) from ._context import ( Context, @@ -107,6 +107,10 @@ class Portal: # point. self._expect_result_ctx: Context|None = None self._streams: set[MsgStream] = set() + + # TODO, this should be PRIVATE (and never used publicly)! since it's just + # a cached ref to the local runtime instead of calling + # `current_actor()` everywhere.. XD self.actor: Actor = current_actor() @property @@ -171,7 +175,7 @@ class Portal: # not expecting a "main" result if self._expect_result_ctx is None: log.warning( - f"Portal for {self.channel.uid} not expecting a final" + f"Portal for {self.channel.aid} not expecting a final" " result?\nresult() should only be called if subactor" " was spawned with `ActorNursery.run_in_actor()`") return NoResult @@ -218,7 +222,7 @@ class Portal: # IPC calls if self._streams: log.cancel( - f"Cancelling all streams with {self.channel.uid}") + f"Cancelling all streams with {self.channel.aid}") for stream in self._streams.copy(): try: await stream.aclose() @@ -263,7 +267,7 @@ class Portal: return False reminfo: str = ( - f'c)=> {self.channel.uid}\n' + f'c)=> {self.channel.aid}\n' f' |_{chan}\n' ) log.cancel( @@ -301,14 +305,34 @@ class Portal: return False except ( + # XXX, should never really get raised unless we aren't + # wrapping them in the below type by mistake? + # + # Leaving the catch here for now until we're very sure + # all the cases (for various tpt protos) have indeed been + # re-wrapped ;p trio.ClosedResourceError, trio.BrokenResourceError, - ): - log.debug( - 'IPC chan for actor already closed or broken?\n\n' - f'{self.channel.uid}\n' + + TransportClosed, + ) as tpt_err: + report: str = ( + f'IPC chan for actor already closed or broken?\n\n' + f'{self.channel.aid}\n' f' |_{self.channel}\n' ) + match tpt_err: + case TransportClosed(): + log.debug(report) + case _: + report += ( + f'\n' + f'Unhandled low-level transport-closed/error during\n' + f'Portal.cancel_actor()` request?\n' + f'<{type(tpt_err).__name__}( {tpt_err} )>\n' + ) + log.warning(report) + return False # TODO: do we still need this for low level `Actor`-runtime @@ -504,8 +528,12 @@ class LocalPortal: return it's result. ''' - obj = self.actor if ns == 'self' else importlib.import_module(ns) - func = getattr(obj, func_name) + obj = ( + self.actor + if ns == 'self' + else importlib.import_module(ns) + ) + func: Callable = getattr(obj, func_name) return await func(**kwargs) @@ -543,17 +571,18 @@ async def open_portal( await channel.connect() was_connected = True - if channel.uid is None: - await actor._do_handshake(channel) + if channel.aid is None: + await channel._do_handshake( + aid=actor.aid, + ) msg_loop_cs: trio.CancelScope|None = None if start_msg_loop: - from ._runtime import process_messages + from . import _rpc msg_loop_cs = await tn.start( partial( - process_messages, - actor, - channel, + _rpc.process_messages, + chan=channel, # if the local task is cancelled we want to keep # the msg loop running until our block ends shield=True, diff --git a/tractor/_root.py b/tractor/_root.py index 2a9beaa3..048e065c 100644 --- a/tractor/_root.py +++ b/tractor/_root.py @@ -18,7 +18,9 @@ Root actor runtime ignition(s). ''' -from contextlib import asynccontextmanager as acm +from contextlib import ( + asynccontextmanager as acm, +) from functools import partial import importlib import inspect @@ -26,7 +28,10 @@ import logging import os import signal import sys -from typing import Callable +from typing import ( + Any, + Callable, +) import warnings @@ -39,83 +44,41 @@ from ._runtime import ( # Arbiter as Registry, async_main, ) -from .devx import _debug +from .devx import ( + debug, + _frame_stack, +) from . import _spawn from . import _state from . import log -from ._ipc import _connect_chan -from ._exceptions import is_multi_cancelled - - -# set at startup and after forks -_default_host: str = '127.0.0.1' -_default_port: int = 1616 - -# default registry always on localhost -_default_lo_addrs: list[tuple[str, int]] = [( - _default_host, - _default_port, -)] +from .ipc import ( + _connect_chan, +) +from ._addr import ( + Address, + UnwrappedAddress, + default_lo_addrs, + mk_uuid, + wrap_address, +) +from ._exceptions import ( + RuntimeFailure, + is_multi_cancelled, +) logger = log.get_logger('tractor') +# TODO: stick this in a `@acm` defined in `devx.debug`? +# -[ ] also maybe consider making this a `wrapt`-deco to +# save an indent level? +# @acm -async def open_root_actor( - - *, - # defaults are above - registry_addrs: list[tuple[str, int]]|None = None, - - # defaults are above - arbiter_addr: tuple[str, int]|None = None, - - name: str|None = 'root', - - # either the `multiprocessing` start method: - # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods - # OR `trio` (the new default). - start_method: _spawn.SpawnMethodKey|None = None, - - # enables the multi-process debugger support - debug_mode: bool = False, - maybe_enable_greenback: bool = True, # `.pause_from_sync()/breakpoint()` support - enable_stack_on_sig: bool = False, - - # internal logging - loglevel: str|None = None, - - enable_modules: list|None = None, - rpc_module_paths: list|None = None, - - # NOTE: allow caller to ensure that only one registry exists - # and that this call creates it. - ensure_registry: bool = False, - - hide_tb: bool = True, - - # XXX, proxied directly to `.devx._debug._maybe_enter_pm()` - # for REPL-entry logic. - debug_filter: Callable[ - [BaseException|BaseExceptionGroup], - bool, - ] = lambda err: not is_multi_cancelled(err), - - # TODO, a way for actors to augment passing derived - # read-only state to sublayers? - # extra_rt_vars: dict|None = None, - -) -> Actor: - ''' - Runtime init entry point for ``tractor``. - - ''' - _debug.hide_runtime_frames() - __tracebackhide__: bool = hide_tb - - # TODO: stick this in a `@cm` defined in `devx._debug`? - # +async def maybe_block_bp( + debug_mode: bool, + maybe_enable_greenback: bool, +) -> bool: # Override the global debugger hook to make it play nice with # ``trio``, see much discussion in: # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 @@ -124,11 +87,12 @@ async def open_root_actor( 'PYTHONBREAKPOINT', None, ) + bp_blocked: bool if ( debug_mode and maybe_enable_greenback and ( - maybe_mod := await _debug.maybe_init_greenback( + maybe_mod := await debug.maybe_init_greenback( raise_not_found=False, ) ) @@ -138,9 +102,10 @@ async def open_root_actor( 'Enabling `tractor.pause_from_sync()` support!\n' ) os.environ['PYTHONBREAKPOINT'] = ( - 'tractor.devx._debug._sync_pause_from_builtin' + 'tractor.devx.debug._sync_pause_from_builtin' ) _state._runtime_vars['use_greenback'] = True + bp_blocked = False else: # TODO: disable `breakpoint()` by default (without @@ -159,302 +124,421 @@ async def open_root_actor( # lol ok, # https://docs.python.org/3/library/sys.html#sys.breakpointhook os.environ['PYTHONBREAKPOINT'] = "0" + bp_blocked = True - # attempt to retreive ``trio``'s sigint handler and stash it - # on our debugger lock state. - _debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) - - # mark top most level process as root actor - _state._runtime_vars['_is_root'] = True - - # caps based rpc list - enable_modules = ( - enable_modules - or - [] - ) - - if rpc_module_paths: - warnings.warn( - "`rpc_module_paths` is now deprecated, use " - " `enable_modules` instead.", - DeprecationWarning, - stacklevel=2, - ) - enable_modules.extend(rpc_module_paths) - - if start_method is not None: - _spawn.try_set_start_method(start_method) - - if arbiter_addr is not None: - warnings.warn( - '`arbiter_addr` is now deprecated\n' - 'Use `registry_addrs: list[tuple]` instead..', - DeprecationWarning, - stacklevel=2, - ) - registry_addrs = [arbiter_addr] - - registry_addrs: list[tuple[str, int]] = ( - registry_addrs - or - _default_lo_addrs - ) - assert registry_addrs - - loglevel = ( - loglevel - or log._default_loglevel - ).upper() - - if ( - debug_mode - and _spawn._spawn_method == 'trio' - ): - _state._runtime_vars['_debug_mode'] = True - - # expose internal debug module to every actor allowing for - # use of ``await tractor.pause()`` - enable_modules.append('tractor.devx._debug') - - # if debug mode get's enabled *at least* use that level of - # logging for some informative console prompts. - if ( - logging.getLevelName( - # lul, need the upper case for the -> int map? - # sweet "dynamic function behaviour" stdlib... - loglevel, - ) > logging.getLevelName('PDB') - ): - loglevel = 'PDB' - - - elif debug_mode: - raise RuntimeError( - "Debug mode is only supported for the `trio` backend!" - ) - - assert loglevel - _log = log.get_console_log(loglevel) - assert _log - - # TODO: factor this into `.devx._stackscope`!! - if ( - debug_mode - and - enable_stack_on_sig - ): - from .devx._stackscope import enable_stack_on_sig - enable_stack_on_sig() - - # closed into below ping task-func - ponged_addrs: list[tuple[str, int]] = [] - - async def ping_tpt_socket( - addr: tuple[str, int], - timeout: float = 1, - ) -> None: - ''' - Attempt temporary connection to see if a registry is - listening at the requested address by a tranport layer - ping. - - If a connection can't be made quickly we assume none no - server is listening at that addr. - - ''' - try: - # TODO: this connect-and-bail forces us to have to - # carefully rewrap TCP 104-connection-reset errors as - # EOF so as to avoid propagating cancel-causing errors - # to the channel-msg loop machinery. Likely it would - # be better to eventually have a "discovery" protocol - # with basic handshake instead? - with trio.move_on_after(timeout): - async with _connect_chan(*addr): - ponged_addrs.append(addr) - - except OSError: - # TODO: make this a "discovery" log level? - logger.info( - f'No actor registry found @ {addr}\n' - ) - - async with trio.open_nursery() as tn: - for addr in registry_addrs: - tn.start_soon( - ping_tpt_socket, - tuple(addr), # TODO: just drop this requirement? - ) - - trans_bind_addrs: list[tuple[str, int]] = [] - - # Create a new local root-actor instance which IS NOT THE - # REGISTRAR - if ponged_addrs: - if ensure_registry: - raise RuntimeError( - f'Failed to open `{name}`@{ponged_addrs}: ' - 'registry socket(s) already bound' - ) - - # we were able to connect to an arbiter - logger.info( - f'Registry(s) seem(s) to exist @ {ponged_addrs}' - ) - - actor = Actor( - name=name or 'anonymous', - registry_addrs=ponged_addrs, - loglevel=loglevel, - enable_modules=enable_modules, - ) - # DO NOT use the registry_addrs as the transport server - # addrs for this new non-registar, root-actor. - for host, port in ponged_addrs: - # NOTE: zero triggers dynamic OS port allocation - trans_bind_addrs.append((host, 0)) - - # Start this local actor as the "registrar", aka a regular - # actor who manages the local registry of "mailboxes" of - # other process-tree-local sub-actors. - else: - - # NOTE that if the current actor IS THE REGISTAR, the - # following init steps are taken: - # - the tranport layer server is bound to each (host, port) - # pair defined in provided registry_addrs, or the default. - trans_bind_addrs = registry_addrs - - # - it is normally desirable for any registrar to stay up - # indefinitely until either all registered (child/sub) - # actors are terminated (via SC supervision) or, - # a re-election process has taken place. - # NOTE: all of ^ which is not implemented yet - see: - # https://github.com/goodboy/tractor/issues/216 - # https://github.com/goodboy/tractor/pull/348 - # https://github.com/goodboy/tractor/issues/296 - - actor = Arbiter( - name or 'registrar', - registry_addrs=registry_addrs, - loglevel=loglevel, - enable_modules=enable_modules, - ) - # XXX, in case the root actor runtime was actually run from - # `tractor.to_asyncio.run_as_asyncio_guest()` and NOt - # `.trio.run()`. - actor._infected_aio = _state._runtime_vars['_is_infected_aio'] - - # Start up main task set via core actor-runtime nurseries. try: - # assign process-local actor - _state._current_actor = actor - - # start local channel-server and fake the portal API - # NOTE: this won't block since we provide the nursery - ml_addrs_str: str = '\n'.join( - f'@{addr}' for addr in trans_bind_addrs - ) - logger.info( - f'Starting local {actor.uid} on the following transport addrs:\n' - f'{ml_addrs_str}' - ) - - # start the actor runtime in a new task - async with trio.open_nursery( - strict_exception_groups=False, - # ^XXX^ TODO? instead unpack any RAE as per "loose" style? - ) as nursery: - - # ``_runtime.async_main()`` creates an internal nursery - # and blocks here until any underlying actor(-process) - # tree has terminated thereby conducting so called - # "end-to-end" structured concurrency throughout an - # entire hierarchical python sub-process set; all - # "actor runtime" primitives are SC-compat and thus all - # transitively spawned actors/processes must be as - # well. - await nursery.start( - partial( - async_main, - actor, - accept_addrs=trans_bind_addrs, - parent_addr=None - ) - ) - try: - yield actor - except ( - Exception, - BaseExceptionGroup, - ) as err: - - # TODO, in beginning to handle the subsubactor with - # crashed grandparent cases.. - # - # was_locked: bool = await _debug.maybe_wait_for_debugger( - # child_in_debug=True, - # ) - # XXX NOTE XXX see equiv note inside - # `._runtime.Actor._stream_handler()` where in the - # non-root or root-that-opened-this-mahually case we - # wait for the local actor-nursery to exit before - # exiting the transport channel handler. - entered: bool = await _debug._maybe_enter_pm( - err, - api_frame=inspect.currentframe(), - debug_filter=debug_filter, - ) - - if ( - not entered - and - not is_multi_cancelled( - err, - ) - ): - logger.exception('Root actor crashed\n') - - # ALWAYS re-raise any error bubbled up from the - # runtime! - raise - - finally: - # NOTE: not sure if we'll ever need this but it's - # possibly better for even more determinism? - # logger.cancel( - # f'Waiting on {len(nurseries)} nurseries in root..') - # nurseries = actor._actoruid2nursery.values() - # async with trio.open_nursery() as tempn: - # for an in nurseries: - # tempn.start_soon(an.exited.wait) - - logger.info( - 'Closing down root actor' - ) - await actor.cancel(None) # self cancel + yield bp_blocked finally: - _state._current_actor = None - _state._last_actor_terminated = actor + # restore any prior built-in `breakpoint()` hook state + if builtin_bp_handler is not None: + sys.breakpointhook = builtin_bp_handler + + if orig_bp_path is not None: + os.environ['PYTHONBREAKPOINT'] = orig_bp_path + + else: + # clear env back to having no entry + os.environ.pop('PYTHONBREAKPOINT', None) + + + +@acm +async def open_root_actor( + *, + # defaults are above + registry_addrs: list[UnwrappedAddress]|None = None, + + # defaults are above + arbiter_addr: tuple[UnwrappedAddress]|None = None, + + enable_transports: list[ + # TODO, this should eventually be the pairs as + # defined by (codec, proto) as on `MsgTransport. + _state.TransportProtocolKey, + ]|None = None, + + name: str|None = 'root', + + # either the `multiprocessing` start method: + # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods + # OR `trio` (the new default). + start_method: _spawn.SpawnMethodKey|None = None, + + # enables the multi-process debugger support + debug_mode: bool = False, + maybe_enable_greenback: bool = False, # `.pause_from_sync()/breakpoint()` support + # ^XXX NOTE^ the perf implications of use, + # https://greenback.readthedocs.io/en/latest/principle.html#performance + enable_stack_on_sig: bool = False, + + # internal logging + loglevel: str|None = None, + + enable_modules: list|None = None, + rpc_module_paths: list|None = None, + + # NOTE: allow caller to ensure that only one registry exists + # and that this call creates it. + ensure_registry: bool = False, + + hide_tb: bool = True, + + # XXX, proxied directly to `.devx.debug._maybe_enter_pm()` + # for REPL-entry logic. + debug_filter: Callable[ + [BaseException|BaseExceptionGroup], + bool, + ] = lambda err: not is_multi_cancelled(err), + + # TODO, a way for actors to augment passing derived + # read-only state to sublayers? + # extra_rt_vars: dict|None = None, + +) -> Actor: + ''' + Runtime init entry point for ``tractor``. + + ''' + # XXX NEVER allow nested actor-trees! + if already_actor := _state.current_actor(err_on_no_runtime=False): + rtvs: dict[str, Any] = _state._runtime_vars + root_mailbox: list[str, int] = rtvs['_root_mailbox'] + registry_addrs: list[list[str, int]] = rtvs['_registry_addrs'] + raise RuntimeFailure( + f'A current actor already exists !?\n' + f'({already_actor}\n' + f'\n' + f'You can NOT open a second root actor from within ' + f'an existing tree and the current root of this ' + f'already exists !!\n' + f'\n' + f'_root_mailbox: {root_mailbox!r}\n' + f'_registry_addrs: {registry_addrs!r}\n' + ) + + async with maybe_block_bp( + debug_mode=debug_mode, + maybe_enable_greenback=maybe_enable_greenback, + ): + if enable_transports is None: + enable_transports: list[str] = _state.current_ipc_protos() + else: + _state._runtime_vars['_enable_tpts'] = enable_transports + + # TODO! support multi-tpts per actor! + # Bo + if not len(enable_transports) == 1: + raise RuntimeError( + f'No multi-tpt support yet!\n' + f'enable_transports={enable_transports!r}\n' + ) + + _frame_stack.hide_runtime_frames() + __tracebackhide__: bool = hide_tb + + # attempt to retreive ``trio``'s sigint handler and stash it + # on our debugger lock state. + debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) + + # mark top most level process as root actor + _state._runtime_vars['_is_root'] = True + + # caps based rpc list + enable_modules = ( + enable_modules + or + [] + ) + + if rpc_module_paths: + warnings.warn( + "`rpc_module_paths` is now deprecated, use " + " `enable_modules` instead.", + DeprecationWarning, + stacklevel=2, + ) + enable_modules.extend(rpc_module_paths) + + if start_method is not None: + _spawn.try_set_start_method(start_method) + + # TODO! remove this ASAP! + if arbiter_addr is not None: + warnings.warn( + '`arbiter_addr` is now deprecated\n' + 'Use `registry_addrs: list[tuple]` instead..', + DeprecationWarning, + stacklevel=2, + ) + registry_addrs = [arbiter_addr] + + if not registry_addrs: + registry_addrs: list[UnwrappedAddress] = default_lo_addrs( + enable_transports + ) + + assert registry_addrs + + loglevel = ( + loglevel + or log._default_loglevel + ).upper() - # restore built-in `breakpoint()` hook state if ( debug_mode and - maybe_enable_greenback + _spawn._spawn_method == 'trio' ): - if builtin_bp_handler is not None: - sys.breakpointhook = builtin_bp_handler + _state._runtime_vars['_debug_mode'] = True - if orig_bp_path is not None: - os.environ['PYTHONBREAKPOINT'] = orig_bp_path + # expose internal debug module to every actor allowing for + # use of ``await tractor.pause()`` + enable_modules.append('tractor.devx.debug._tty_lock') - else: - # clear env back to having no entry - os.environ.pop('PYTHONBREAKPOINT', None) + # if debug mode get's enabled *at least* use that level of + # logging for some informative console prompts. + if ( + logging.getLevelName( + # lul, need the upper case for the -> int map? + # sweet "dynamic function behaviour" stdlib... + loglevel, + ) > logging.getLevelName('PDB') + ): + loglevel = 'PDB' - logger.runtime("Root actor terminated") + + elif debug_mode: + raise RuntimeError( + "Debug mode is only supported for the `trio` backend!" + ) + + assert loglevel + _log = log.get_console_log(loglevel) + assert _log + + # TODO: factor this into `.devx._stackscope`!! + if ( + debug_mode + and + enable_stack_on_sig + ): + from .devx._stackscope import enable_stack_on_sig + enable_stack_on_sig() + + # closed into below ping task-func + ponged_addrs: list[UnwrappedAddress] = [] + + async def ping_tpt_socket( + addr: UnwrappedAddress, + timeout: float = 1, + ) -> None: + ''' + Attempt temporary connection to see if a registry is + listening at the requested address by a tranport layer + ping. + + If a connection can't be made quickly we assume none no + server is listening at that addr. + + ''' + try: + # TODO: this connect-and-bail forces us to have to + # carefully rewrap TCP 104-connection-reset errors as + # EOF so as to avoid propagating cancel-causing errors + # to the channel-msg loop machinery. Likely it would + # be better to eventually have a "discovery" protocol + # with basic handshake instead? + with trio.move_on_after(timeout): + async with _connect_chan(addr): + ponged_addrs.append(addr) + + except OSError: + # TODO: make this a "discovery" log level? + logger.info( + f'No actor registry found @ {addr}\n' + ) + + async with trio.open_nursery() as tn: + for addr in registry_addrs: + tn.start_soon( + ping_tpt_socket, + addr, + ) + + trans_bind_addrs: list[UnwrappedAddress] = [] + + # Create a new local root-actor instance which IS NOT THE + # REGISTRAR + if ponged_addrs: + if ensure_registry: + raise RuntimeError( + f'Failed to open `{name}`@{ponged_addrs}: ' + 'registry socket(s) already bound' + ) + + # we were able to connect to an arbiter + logger.info( + f'Registry(s) seem(s) to exist @ {ponged_addrs}' + ) + + actor = Actor( + name=name or 'anonymous', + uuid=mk_uuid(), + registry_addrs=ponged_addrs, + loglevel=loglevel, + enable_modules=enable_modules, + ) + # DO NOT use the registry_addrs as the transport server + # addrs for this new non-registar, root-actor. + for addr in ponged_addrs: + waddr: Address = wrap_address(addr) + trans_bind_addrs.append( + waddr.get_random(bindspace=waddr.bindspace) + ) + + # Start this local actor as the "registrar", aka a regular + # actor who manages the local registry of "mailboxes" of + # other process-tree-local sub-actors. + else: + + # NOTE that if the current actor IS THE REGISTAR, the + # following init steps are taken: + # - the tranport layer server is bound to each addr + # pair defined in provided registry_addrs, or the default. + trans_bind_addrs = registry_addrs + + # - it is normally desirable for any registrar to stay up + # indefinitely until either all registered (child/sub) + # actors are terminated (via SC supervision) or, + # a re-election process has taken place. + # NOTE: all of ^ which is not implemented yet - see: + # https://github.com/goodboy/tractor/issues/216 + # https://github.com/goodboy/tractor/pull/348 + # https://github.com/goodboy/tractor/issues/296 + + actor = Arbiter( + name=name or 'registrar', + uuid=mk_uuid(), + registry_addrs=registry_addrs, + loglevel=loglevel, + enable_modules=enable_modules, + ) + # XXX, in case the root actor runtime was actually run from + # `tractor.to_asyncio.run_as_asyncio_guest()` and NOt + # `.trio.run()`. + actor._infected_aio = _state._runtime_vars['_is_infected_aio'] + + # Start up main task set via core actor-runtime nurseries. + try: + # assign process-local actor + _state._current_actor = actor + + # start local channel-server and fake the portal API + # NOTE: this won't block since we provide the nursery + ml_addrs_str: str = '\n'.join( + f'@{addr}' for addr in trans_bind_addrs + ) + logger.info( + f'Starting local {actor.uid} on the following transport addrs:\n' + f'{ml_addrs_str}' + ) + + # start the actor runtime in a new task + async with trio.open_nursery( + strict_exception_groups=False, + # ^XXX^ TODO? instead unpack any RAE as per "loose" style? + ) as nursery: + + # ``_runtime.async_main()`` creates an internal nursery + # and blocks here until any underlying actor(-process) + # tree has terminated thereby conducting so called + # "end-to-end" structured concurrency throughout an + # entire hierarchical python sub-process set; all + # "actor runtime" primitives are SC-compat and thus all + # transitively spawned actors/processes must be as + # well. + await nursery.start( + partial( + async_main, + actor, + accept_addrs=trans_bind_addrs, + parent_addr=None + ) + ) + try: + yield actor + except ( + Exception, + BaseExceptionGroup, + ) as err: + + # TODO, in beginning to handle the subsubactor with + # crashed grandparent cases.. + # + # was_locked: bool = await debug.maybe_wait_for_debugger( + # child_in_debug=True, + # ) + # XXX NOTE XXX see equiv note inside + # `._runtime.Actor._stream_handler()` where in the + # non-root or root-that-opened-this-mahually case we + # wait for the local actor-nursery to exit before + # exiting the transport channel handler. + entered: bool = await debug._maybe_enter_pm( + err, + api_frame=inspect.currentframe(), + debug_filter=debug_filter, + ) + + if ( + not entered + and + not is_multi_cancelled( + err, + ) + ): + logger.exception( + 'Root actor crashed\n' + f'>x)\n' + f' |_{actor}\n' + ) + + # ALWAYS re-raise any error bubbled up from the + # runtime! + raise + + finally: + # NOTE: not sure if we'll ever need this but it's + # possibly better for even more determinism? + # logger.cancel( + # f'Waiting on {len(nurseries)} nurseries in root..') + # nurseries = actor._actoruid2nursery.values() + # async with trio.open_nursery() as tempn: + # for an in nurseries: + # tempn.start_soon(an.exited.wait) + + logger.info( + f'Closing down root actor\n' + f'>)\n' + f'|_{actor}\n' + ) + await actor.cancel(None) # self cancel + finally: + # revert all process-global runtime state + if ( + debug_mode + and + _spawn._spawn_method == 'trio' + ): + _state._runtime_vars['_debug_mode'] = False + + _state._current_actor = None + _state._last_actor_terminated = actor + + logger.runtime( + f'Root actor terminated\n' + f')>\n' + f' |_{actor}\n' + ) def run_daemon( @@ -462,7 +546,7 @@ def run_daemon( # runtime kwargs name: str | None = 'root', - registry_addrs: list[tuple[str, int]] = _default_lo_addrs, + registry_addrs: list[UnwrappedAddress]|None = None, start_method: str | None = None, debug_mode: bool = False, diff --git a/tractor/_rpc.py b/tractor/_rpc.py index c5daed9e..30b72c1d 100644 --- a/tractor/_rpc.py +++ b/tractor/_rpc.py @@ -42,7 +42,7 @@ from trio import ( TaskStatus, ) -from ._ipc import Channel +from .ipc import Channel from ._context import ( Context, ) @@ -57,7 +57,7 @@ from ._exceptions import ( unpack_error, ) from .devx import ( - _debug, + debug, add_div, ) from . import _state @@ -266,7 +266,7 @@ async def _errors_relayed_via_ipc( # TODO: a debug nursery when in debug mode! # async with maybe_open_debugger_nursery() as debug_tn: - # => see matching comment in side `._debug._pause()` + # => see matching comment in side `.debug._pause()` rpc_err: BaseException|None = None try: yield # run RPC invoke body @@ -318,7 +318,7 @@ async def _errors_relayed_via_ipc( 'RPC task crashed, attempting to enter debugger\n' f'|_{ctx}' ) - entered_debug = await _debug._maybe_enter_pm( + entered_debug = await debug._maybe_enter_pm( err, api_frame=inspect.currentframe(), ) @@ -462,7 +462,7 @@ async def _invoke( ): # XXX for .pause_from_sync()` usage we need to make sure # `greenback` is boostrapped in the subactor! - await _debug.maybe_init_greenback() + await debug.maybe_init_greenback() # TODO: possibly a specially formatted traceback # (not sure what typing is for this..)? @@ -751,7 +751,7 @@ async def _invoke( and 'Cancel scope stack corrupted' in scope_error.args[0] ): log.exception('Cancel scope stack corrupted!?\n') - # _debug.mk_pdb().set_trace() + # debug.mk_pdb().set_trace() # always set this (child) side's exception as the # local error on the context @@ -779,7 +779,7 @@ async def _invoke( # don't pop the local context until we know the # associated child isn't in debug any more - await _debug.maybe_wait_for_debugger() + await debug.maybe_wait_for_debugger() ctx: Context = actor._contexts.pop(( chan.uid, cid, @@ -869,7 +869,6 @@ async def try_ship_error_to_remote( async def process_messages( - actor: Actor, chan: Channel, shield: bool = False, task_status: TaskStatus[CancelScope] = trio.TASK_STATUS_IGNORED, @@ -907,6 +906,7 @@ async def process_messages( (as utilized inside `Portal.cancel_actor()` ). ''' + actor: Actor = _state.current_actor() assert actor._service_n # runtime state sanity # TODO: once `trio` get's an "obvious way" for req/resp we @@ -983,7 +983,7 @@ async def process_messages( # XXX NOTE XXX don't start entire actor # runtime cancellation if this actor is # currently in debug mode! - pdb_complete: trio.Event|None = _debug.DebugStatus.repl_release + pdb_complete: trio.Event|None = debug.DebugStatus.repl_release if pdb_complete: await pdb_complete.wait() @@ -1156,7 +1156,7 @@ async def process_messages( trio.Event(), ) - # runtime-scoped remote (internal) error + # XXX RUNTIME-SCOPED! remote (likely internal) error # (^- bc no `Error.cid` -^) # # NOTE: this is the non-rpc error case, that @@ -1219,8 +1219,10 @@ async def process_messages( # -[ ] figure out how this will break with other transports? tc.report_n_maybe_raise( message=( - f'peer IPC channel closed abruptly?\n\n' - f'<=x {chan}\n' + f'peer IPC channel closed abruptly?\n' + f'\n' + f'<=x[\n' + f' {chan}\n' f' |_{chan.raddr}\n\n' ) + diff --git a/tractor/_runtime.py b/tractor/_runtime.py index 890a690a..758e5685 100644 --- a/tractor/_runtime.py +++ b/tractor/_runtime.py @@ -40,18 +40,18 @@ from __future__ import annotations from contextlib import ( ExitStack, ) -from collections import defaultdict from functools import partial -from itertools import chain import importlib import importlib.util import os +from pathlib import Path from pprint import pformat import signal import sys from typing import ( Any, Callable, + Type, TYPE_CHECKING, ) import uuid @@ -73,7 +73,18 @@ from tractor.msg import ( pretty_struct, types as msgtypes, ) -from ._ipc import Channel +from .ipc import ( + Channel, + # IPCServer, # causes cycles atm.. + _server, +) +from ._addr import ( + UnwrappedAddress, + Address, + # default_lo_addrs, + get_address_cls, + wrap_address, +) from ._context import ( mk_context, Context, @@ -85,18 +96,13 @@ from ._exceptions import ( ModuleNotExposed, MsgTypeError, unpack_error, - TransportClosed, ) -from .devx import _debug +from .devx import debug from ._discovery import get_registry from ._portal import Portal from . import _state from . import _mp_fixup_main -from ._rpc import ( - process_messages, - try_ship_error_to_remote, -) - +from . import _rpc if TYPE_CHECKING: from ._supervise import ActorNursery @@ -106,8 +112,22 @@ if TYPE_CHECKING: log = get_logger('tractor') -def _get_mod_abspath(module): - return os.path.abspath(module.__file__) +def _get_mod_abspath(module: ModuleType) -> Path: + return Path(module.__file__).absolute() + + +def get_mod_nsps2fps(mod_ns_paths: list[str]) -> dict[str, str]: + ''' + Deliver a table of py module namespace-path-`str`s mapped to + their "physical" `.py` file paths in the file-sys. + + ''' + nsp2fp: dict[str, str] = {} + for nsp in mod_ns_paths: + mod: ModuleType = importlib.import_module(nsp) + nsp2fp[nsp] = str(_get_mod_abspath(mod)) + + return nsp2fp class Actor: @@ -149,16 +169,23 @@ class Actor: # nursery placeholders filled in by `async_main()` after fork _root_n: Nursery|None = None _service_n: Nursery|None = None - _server_n: Nursery|None = None + + _ipc_server: _server.IPCServer|None = None + + @property + def ipc_server(self) -> _server.IPCServer: + ''' + The IPC transport-server for this actor; normally + a process-singleton. + + ''' + return self._ipc_server # Information about `__main__` from parent _parent_main_data: dict[str, str] _parent_chan_cs: CancelScope|None = None _spawn_spec: msgtypes.SpawnSpec|None = None - # syncs for setup/teardown sequences - _server_down: trio.Event|None = None - # if started on ``asycio`` running ``trio`` in guest mode _infected_aio: bool = False @@ -175,15 +202,15 @@ class Actor: def __init__( self, name: str, + uuid: str, *, enable_modules: list[str] = [], - uid: str|None = None, loglevel: str|None = None, - registry_addrs: list[tuple[str, int]]|None = None, + registry_addrs: list[UnwrappedAddress]|None = None, spawn_method: str|None = None, # TODO: remove! - arbiter_addr: tuple[str, int]|None = None, + arbiter_addr: UnwrappedAddress|None = None, ) -> None: ''' @@ -191,12 +218,14 @@ class Actor: phase (aka before a new process is executed). ''' - self.name = name - self.uid = ( - name, - uid or str(uuid.uuid4()) + self._aid = msgtypes.Aid( + name=name, + uuid=uuid, + pid=os.getpid(), ) + self._task: trio.Task|None = None + # state self._cancel_complete = trio.Event() self._cancel_called_by_remote: tuple[str, tuple]|None = None self._cancel_called: bool = False @@ -205,13 +234,14 @@ class Actor: # will be passed to children self._parent_main_data = _mp_fixup_main._mp_figure_out_main() + # TODO? only add this when `is_debug_mode() == True` no? # always include debugging tools module - enable_modules.append('tractor.devx._debug') + if _state.is_root_process(): + enable_modules.append('tractor.devx.debug._tty_lock') - self.enable_modules: dict[str, str] = {} - for name in enable_modules: - mod: ModuleType = importlib.import_module(name) - self.enable_modules[name] = _get_mod_abspath(mod) + self.enable_modules: dict[str, str] = get_mod_nsps2fps( + mod_ns_paths=enable_modules, + ) self._mods: dict[str, ModuleType] = {} self.loglevel: str = loglevel @@ -223,21 +253,13 @@ class Actor: DeprecationWarning, stacklevel=2, ) - registry_addrs: list[tuple[str, int]] = [arbiter_addr] + registry_addrs: list[UnwrappedAddress] = [arbiter_addr] # marked by the process spawning backend at startup # will be None for the parent most process started manually # by the user (currently called the "arbiter") self._spawn_method: str = spawn_method - self._peers: defaultdict[ - str, # uaid - list[Channel], # IPC conns from peer - ] = defaultdict(list) - self._peer_connected: dict[tuple[str, str], trio.Event] = {} - self._no_more_peers = trio.Event() - self._no_more_peers.set() - # RPC state self._ongoing_rpc_tasks = trio.Event() self._ongoing_rpc_tasks.set() @@ -256,7 +278,6 @@ class Actor: Context ] = {} - self._listeners: list[trio.abc.Listener] = [] self._parent_chan: Channel|None = None self._forkserver_info: tuple|None = None @@ -269,13 +290,100 @@ class Actor: # when provided, init the registry addresses property from # input via the validator. - self._reg_addrs: list[tuple[str, int]] = [] + self._reg_addrs: list[UnwrappedAddress] = [] if registry_addrs: - self.reg_addrs: list[tuple[str, int]] = registry_addrs + self.reg_addrs: list[UnwrappedAddress] = registry_addrs _state._runtime_vars['_registry_addrs'] = registry_addrs @property - def reg_addrs(self) -> list[tuple[str, int]]: + def aid(self) -> msgtypes.Aid: + ''' + This process-singleton-actor's "unique actor ID" in struct form. + + See the `tractor.msg.Aid` struct for details. + + ''' + return self._aid + + @property + def name(self) -> str: + return self._aid.name + + @property + def uid(self) -> tuple[str, str]: + ''' + This process-singleton's "unique (cross-host) ID". + + Delivered from the `.Aid.name/.uuid` fields as a `tuple` pair + and should be multi-host unique despite a large distributed + process plane. + + ''' + msg: str = ( + f'`{type(self).__name__}.uid` is now deprecated.\n' + 'Use the new `.aid: tractor.msg.Aid` (struct) instead ' + 'which also provides additional named (optional) fields ' + 'beyond just the `.name` and `.uuid`.' + ) + warnings.warn( + msg, + DeprecationWarning, + stacklevel=2, + ) + return ( + self._aid.name, + self._aid.uuid, + ) + + @property + def pid(self) -> int: + return self._aid.pid + + def pformat(self) -> str: + ds: str = '=' + parent_uid: tuple|None = None + if rent_chan := self._parent_chan: + parent_uid = rent_chan.uid + + peers: list = [] + server: _server.IPCServer = self.ipc_server + if server: + peers: list[tuple] = list(server._peer_connected) + + fmtstr: str = ( + f' |_id: {self.aid!r}\n' + # f" aid{ds}{self.aid!r}\n" + f" parent{ds}{parent_uid}\n" + f'\n' + f' |_ipc: {len(peers)!r} connected peers\n' + f" peers{ds}{peers!r}\n" + f" ipc_server{ds}{self._ipc_server}\n" + f'\n' + f' |_rpc: {len(self._rpc_tasks)} tasks\n' + f" ctxs{ds}{len(self._contexts)}\n" + f'\n' + f' |_runtime: ._task{ds}{self._task!r}\n' + f' _spawn_method{ds}{self._spawn_method}\n' + f' _actoruid2nursery{ds}{self._actoruid2nursery}\n' + f' _forkserver_info{ds}{self._forkserver_info}\n' + f'\n' + f' |_state: "TODO: .repr_state()"\n' + f' _cancel_complete{ds}{self._cancel_complete}\n' + f' _cancel_called_by_remote{ds}{self._cancel_called_by_remote}\n' + f' _cancel_called{ds}{self._cancel_called}\n' + ) + return ( + '\n' + ) + + __repr__ = pformat + + @property + def reg_addrs(self) -> list[UnwrappedAddress]: ''' List of (socket) addresses for all known (and contactable) registry actors. @@ -286,7 +394,7 @@ class Actor: @reg_addrs.setter def reg_addrs( self, - addrs: list[tuple[str, int]], + addrs: list[UnwrappedAddress], ) -> None: if not addrs: log.warning( @@ -295,39 +403,10 @@ class Actor: ) return - # always sanity check the input list since it's critical - # that addrs are correct for discovery sys operation. - for addr in addrs: - if not isinstance(addr, tuple): - raise ValueError( - 'Expected `Actor.reg_addrs: list[tuple[str, int]]`\n' - f'Got {addrs}' - ) - - self._reg_addrs = addrs - - async def wait_for_peer( - self, - uid: tuple[str, str], - - ) -> tuple[trio.Event, Channel]: - ''' - Wait for a connection back from a (spawned sub-)actor with - a `uid` using a `trio.Event` for sync. - - ''' - log.debug(f'Waiting for peer {uid!r} to connect') - event = self._peer_connected.setdefault(uid, trio.Event()) - await event.wait() - log.debug(f'{uid!r} successfully connected back to us') - return ( - event, - self._peers[uid][-1], - ) + self._reg_addrs = addrs def load_modules( self, - # debug_mode: bool = False, ) -> None: ''' Load explicitly enabled python modules from local fs after @@ -349,6 +428,9 @@ class Actor: parent_data['init_main_from_path']) status: str = 'Attempting to import enabled modules:\n' + + modpath: str + filepath: str for modpath, filepath in self.enable_modules.items(): # XXX append the allowed module to the python path which # should allow for relative (at least downward) imports. @@ -400,416 +482,6 @@ class Actor: raise mne - # TODO: maybe change to mod-func and rename for implied - # multi-transport semantics? - async def _stream_handler( - self, - stream: trio.SocketStream, - - ) -> None: - ''' - Entry point for new inbound IPC connections on a specific - transport server. - - ''' - self._no_more_peers = trio.Event() # unset by making new - chan = Channel.from_stream(stream) - con_status: str = ( - 'New inbound IPC connection <=\n' - f'|_{chan}\n' - ) - - # send/receive initial handshake response - try: - uid: tuple|None = await self._do_handshake(chan) - except ( - # we need this for ``msgspec`` for some reason? - # for now, it's been put in the stream backend. - # trio.BrokenResourceError, - # trio.ClosedResourceError, - - TransportClosed, - ): - # XXX: This may propagate up from `Channel._aiter_recv()` - # and `MsgpackStream._inter_packets()` on a read from the - # stream particularly when the runtime is first starting up - # inside `open_root_actor()` where there is a check for - # a bound listener on the "arbiter" addr. the reset will be - # because the handshake was never meant took place. - log.runtime( - con_status - + - ' -> But failed to handshake? Ignoring..\n' - ) - return - - familiar: str = 'new-peer' - if _pre_chan := self._peers.get(uid): - familiar: str = 'pre-existing-peer' - uid_short: str = f'{uid[0]}[{uid[1][-6:]}]' - con_status += ( - f' -> Handshake with {familiar} `{uid_short}` complete\n' - ) - - if _pre_chan: - # con_status += ( - # ^TODO^ swap once we minimize conn duplication - # -[ ] last thing might be reg/unreg runtime reqs? - # log.warning( - log.debug( - f'?Wait?\n' - f'We already have IPC with peer {uid_short!r}\n' - f'|_{_pre_chan}\n' - ) - - # IPC connection tracking for both peers and new children: - # - if this is a new channel to a locally spawned - # sub-actor there will be a spawn wait even registered - # by a call to `.wait_for_peer()`. - # - if a peer is connecting no such event will exit. - event: trio.Event|None = self._peer_connected.pop( - uid, - None, - ) - if event: - con_status += ( - ' -> Waking subactor spawn waiters: ' - f'{event.statistics().tasks_waiting}\n' - f' -> Registered IPC chan for child actor {uid}@{chan.raddr}\n' - # f' {event}\n' - # f' |{event.statistics()}\n' - ) - # wake tasks waiting on this IPC-transport "connect-back" - event.set() - - else: - con_status += ( - f' -> Registered IPC chan for peer actor {uid}@{chan.raddr}\n' - ) # type: ignore - - chans: list[Channel] = self._peers[uid] - # if chans: - # # TODO: re-use channels for new connections instead - # # of always new ones? - # # => will require changing all the discovery funcs.. - - # append new channel - # TODO: can we just use list-ref directly? - chans.append(chan) - - con_status += ' -> Entering RPC msg loop..\n' - log.runtime(con_status) - - # Begin channel management - respond to remote requests and - # process received reponses. - disconnected: bool = False - last_msg: MsgType - try: - ( - disconnected, - last_msg, - ) = await process_messages( - self, - chan, - ) - except trio.Cancelled: - log.cancel( - 'IPC transport msg loop was cancelled\n' - f'c)>\n' - f' |_{chan}\n' - ) - raise - - finally: - local_nursery: ( - ActorNursery|None - ) = self._actoruid2nursery.get(uid) - - # This is set in ``Portal.cancel_actor()``. So if - # the peer was cancelled we try to wait for them - # to tear down their side of the connection before - # moving on with closing our own side. - if ( - local_nursery - and ( - self._cancel_called - or - chan._cancel_called - ) - # - # ^-TODO-^ along with this is there another condition - # that we should filter with to avoid entering this - # waiting block needlessly? - # -[ ] maybe `and local_nursery.cancelled` and/or - # only if the `._children` table is empty or has - # only `Portal`s with .chan._cancel_called == - # True` as per what we had below; the MAIN DIFF - # BEING that just bc one `Portal.cancel_actor()` - # was called, doesn't mean the whole actor-nurse - # is gonna exit any time soon right!? - # - # or - # all(chan._cancel_called for chan in chans) - - ): - log.cancel( - 'Waiting on cancel request to peer..\n' - f'c)=>\n' - f' |_{chan.uid}\n' - ) - - # XXX: this is a soft wait on the channel (and its - # underlying transport protocol) to close from the - # remote peer side since we presume that any channel - # which is mapped to a sub-actor (i.e. it's managed - # by local actor-nursery) has a message that is sent - # to the peer likely by this actor (which may be in - # a shutdown sequence due to cancellation) when the - # local runtime here is now cancelled while - # (presumably) in the middle of msg loop processing. - chan_info: str = ( - f'{chan.uid}\n' - f'|_{chan}\n' - f' |_{chan.transport}\n\n' - ) - with trio.move_on_after(0.5) as drain_cs: - drain_cs.shield = True - - # attempt to wait for the far end to close the - # channel and bail after timeout (a 2-generals - # problem on closure). - assert chan.transport - async for msg in chan.transport.drain(): - - # try to deliver any lingering msgs - # before we destroy the channel. - # This accomplishes deterministic - # ``Portal.cancel_actor()`` cancellation by - # making sure any RPC response to that call is - # delivered the local calling task. - # TODO: factor this into a helper? - log.warning( - 'Draining msg from disconnected peer\n' - f'{chan_info}' - f'{pformat(msg)}\n' - ) - # cid: str|None = msg.get('cid') - cid: str|None = msg.cid - if cid: - # deliver response to local caller/waiter - await self._deliver_ctx_payload( - chan, - cid, - msg, - ) - if drain_cs.cancelled_caught: - log.warning( - 'Timed out waiting on IPC transport channel to drain?\n' - f'{chan_info}' - ) - - # XXX NOTE XXX when no explicit call to - # `open_root_actor()` was made by the application - # (normally we implicitly make that call inside - # the first `.open_nursery()` in root-actor - # user/app code), we can assume that either we - # are NOT the root actor or are root but the - # runtime was started manually. and thus DO have - # to wait for the nursery-enterer to exit before - # shutting down the local runtime to avoid - # clobbering any ongoing subactor - # teardown/debugging/graceful-cancel. - # - # see matching note inside `._supervise.open_nursery()` - # - # TODO: should we have a separate cs + timeout - # block here? - if ( - # XXX SO either, - # - not root OR, - # - is root but `open_root_actor()` was - # entered manually (in which case we do - # the equiv wait there using the - # `devx._debug` sub-sys APIs). - not local_nursery._implicit_runtime_started - ): - log.runtime( - 'Waiting on local actor nursery to exit..\n' - f'|_{local_nursery}\n' - ) - with trio.move_on_after(0.5) as an_exit_cs: - an_exit_cs.shield = True - await local_nursery.exited.wait() - - # TODO: currently this is always triggering for every - # sub-daemon spawned from the `piker.services._mngr`? - # -[ ] how do we ensure that the IPC is supposed to - # be long lived and isn't just a register? - # |_ in the register case how can we signal that the - # ephemeral msg loop was intentional? - if ( - # not local_nursery._implicit_runtime_started - # and - an_exit_cs.cancelled_caught - ): - report: str = ( - 'Timed out waiting on local actor-nursery to exit?\n' - f'c)>\n' - f' |_{local_nursery}\n' - ) - if children := local_nursery._children: - # indent from above local-nurse repr - report += ( - f' |_{pformat(children)}\n' - ) - - log.warning(report) - - if disconnected: - # if the transport died and this actor is still - # registered within a local nursery, we report - # that the IPC layer may have failed - # unexpectedly since it may be the cause of - # other downstream errors. - entry: tuple|None = local_nursery._children.get(uid) - if entry: - proc: trio.Process - _, proc, _ = entry - - if ( - (poll := getattr(proc, 'poll', None)) - and - poll() is None # proc still alive - ): - # TODO: change log level based on - # detecting whether chan was created for - # ephemeral `.register_actor()` request! - # -[ ] also, that should be avoidable by - # re-using any existing chan from the - # `._discovery.get_registry()` call as - # well.. - log.runtime( - f'Peer IPC broke but subproc is alive?\n\n' - - f'<=x {chan.uid}@{chan.raddr}\n' - f' |_{proc}\n' - ) - - # ``Channel`` teardown and closure sequence - # drop ref to channel so it can be gc-ed and disconnected - con_teardown_status: str = ( - f'IPC channel disconnected:\n' - f'<=x uid: {chan.uid}\n' - f' |_{pformat(chan)}\n\n' - ) - chans.remove(chan) - - # TODO: do we need to be this pedantic? - if not chans: - con_teardown_status += ( - f'-> No more channels with {chan.uid}' - ) - self._peers.pop(uid, None) - - peers_str: str = '' - for uid, chans in self._peers.items(): - peers_str += ( - f'uid: {uid}\n' - ) - for i, chan in enumerate(chans): - peers_str += ( - f' |_[{i}] {pformat(chan)}\n' - ) - - con_teardown_status += ( - f'-> Remaining IPC {len(self._peers)} peers: {peers_str}\n' - ) - - # No more channels to other actors (at all) registered - # as connected. - if not self._peers: - con_teardown_status += ( - 'Signalling no more peer channel connections' - ) - self._no_more_peers.set() - - # NOTE: block this actor from acquiring the - # debugger-TTY-lock since we have no way to know if we - # cancelled it and further there is no way to ensure the - # lock will be released if acquired due to having no - # more active IPC channels. - if _state.is_root_process(): - pdb_lock = _debug.Lock - pdb_lock._blocked.add(uid) - - # TODO: NEEEDS TO BE TESTED! - # actually, no idea if this ever even enters.. XD - # - # XXX => YES IT DOES, when i was testing ctl-c - # from broken debug TTY locking due to - # msg-spec races on application using RunVar... - if ( - (ctx_in_debug := pdb_lock.ctx_in_debug) - and - (pdb_user_uid := ctx_in_debug.chan.uid) - and - local_nursery - ): - entry: tuple|None = local_nursery._children.get( - tuple(pdb_user_uid) - ) - if entry: - proc: trio.Process - _, proc, _ = entry - - if ( - (poll := getattr(proc, 'poll', None)) - and poll() is None - ): - log.cancel( - 'Root actor reports no-more-peers, BUT\n' - 'a DISCONNECTED child still has the debug ' - 'lock!\n\n' - # f'root uid: {self.uid}\n' - f'last disconnected child uid: {uid}\n' - f'locking child uid: {pdb_user_uid}\n' - ) - await _debug.maybe_wait_for_debugger( - child_in_debug=True - ) - - # TODO: just bc a child's transport dropped - # doesn't mean it's not still using the pdb - # REPL! so, - # -[ ] ideally we can check out child proc - # tree to ensure that its alive (and - # actually using the REPL) before we cancel - # it's lock acquire by doing the below! - # -[ ] create a way to read the tree of each actor's - # grandchildren such that when an - # intermediary parent is cancelled but their - # child has locked the tty, the grandparent - # will not allow the parent to cancel or - # zombie reap the child! see open issue: - # - https://github.com/goodboy/tractor/issues/320 - # ------ - ------ - # if a now stale local task has the TTY lock still - # we cancel it to allow servicing other requests for - # the lock. - if ( - (db_cs := pdb_lock.get_locking_task_cs()) - and not db_cs.cancel_called - and uid == pdb_user_uid - ): - log.critical( - f'STALE DEBUG LOCK DETECTED FOR {uid}' - ) - # TODO: figure out why this breaks tests.. - db_cs.cancel() - - log.runtime(con_teardown_status) - # finally block closure - # TODO: rename to `._deliver_payload()` since this handles # more then just `result` msgs now obvi XD async def _deliver_ctx_payload( @@ -1024,11 +696,12 @@ class Actor: async def _from_parent( self, - parent_addr: tuple[str, int]|None, + parent_addr: UnwrappedAddress|None, ) -> tuple[ Channel, - list[tuple[str, int]]|None, + list[UnwrappedAddress]|None, + list[str]|None, # preferred tpts ]: ''' Bootstrap this local actor's runtime config from its parent by @@ -1040,35 +713,67 @@ class Actor: # Connect back to the parent actor and conduct initial # handshake. From this point on if we error, we # attempt to ship the exception back to the parent. - chan = Channel( - destaddr=parent_addr, + chan = await Channel.from_addr( + addr=wrap_address(parent_addr) ) - await chan.connect() + assert isinstance(chan, Channel) - # TODO: move this into a `Channel.handshake()`? - # Initial handshake: swap names. - await self._do_handshake(chan) + # init handshake: swap actor-IDs. + await chan._do_handshake(aid=self.aid) - accept_addrs: list[tuple[str, int]]|None = None + accept_addrs: list[UnwrappedAddress]|None = None if self._spawn_method == "trio": # Receive post-spawn runtime state from our parent. spawnspec: msgtypes.SpawnSpec = await chan.recv() - self._spawn_spec = spawnspec + match spawnspec: + case MsgTypeError(): + raise spawnspec + case msgtypes.SpawnSpec(): + self._spawn_spec = spawnspec + log.runtime( + 'Received runtime spec from parent:\n\n' - log.runtime( - 'Received runtime spec from parent:\n\n' + # TODO: eventually all these msgs as + # `msgspec.Struct` with a special mode that + # pformats them in multi-line mode, BUT only + # if "trace"/"util" mode is enabled? + f'{pretty_struct.pformat(spawnspec)}\n' + ) - # TODO: eventually all these msgs as - # `msgspec.Struct` with a special mode that - # pformats them in multi-line mode, BUT only - # if "trace"/"util" mode is enabled? - f'{pretty_struct.pformat(spawnspec)}\n' - ) - accept_addrs: list[tuple[str, int]] = spawnspec.bind_addrs + case _: + raise InternalError( + f'Received invalid non-`SpawnSpec` payload !?\n' + f'{spawnspec}\n' + ) + # ^^XXX TODO XXX^^^ + # when the `SpawnSpec` fails to decode the above will + # raise a `MsgTypeError` which if we do NOT ALSO + # RAISE it will tried to be pprinted in the + # log.runtime() below.. + # + # SO we gotta look at how other `chan.recv()` calls + # are wrapped and do the same for this spec receive! + # -[ ] see `._rpc` likely has the answer? - # TODO: another `Struct` for rtvs.. + # ^^^XXX NOTE XXX^^^, can't be called here! + # + # breakpoint() + # import pdbp; pdbp.set_trace() + # + # => bc we haven't yet received the + # `spawnspec._runtime_vars` which contains + # `debug_mode: bool`.. + + # `SpawnSpec.bind_addrs` + # --------------------- + accept_addrs: list[UnwrappedAddress] = spawnspec.bind_addrs + + # `SpawnSpec._runtime_vars` + # ------------------------- + # => update process-wide globals + # TODO! -[ ] another `Struct` for rtvs.. rvs: dict[str, Any] = spawnspec._runtime_vars if rvs['_debug_mode']: from .devx import ( @@ -1126,18 +831,20 @@ class Actor: f'self._infected_aio = {aio_attr}\n' ) if aio_rtv: - assert trio_runtime.GLOBAL_RUN_CONTEXT.runner.is_guest - # ^TODO^ possibly add a `sniffio` or - # `trio` pub-API for `is_guest_mode()`? + assert ( + trio_runtime.GLOBAL_RUN_CONTEXT.runner.is_guest + # and + # ^TODO^ possibly add a `sniffio` or + # `trio` pub-API for `is_guest_mode()`? + ) rvs['_is_root'] = False # obvi XD - # update process-wide globals _state._runtime_vars.update(rvs) - # XXX: ``msgspec`` doesn't support serializing tuples - # so just cash manually here since it's what our - # internals expect. + # `SpawnSpec.reg_addrs` + # --------------------- + # => update parent provided registrar contact info # self.reg_addrs = [ # TODO: we don't really NEED these as tuples? @@ -1148,82 +855,45 @@ class Actor: for val in spawnspec.reg_addrs ] - # TODO: better then monkey patching.. - # -[ ] maybe read the actual f#$-in `._spawn_spec` XD - for _, attr, value in pretty_struct.iter_fields( - spawnspec, - ): - setattr(self, attr, value) + # `SpawnSpec.enable_modules` + # --------------------- + # => extend RPC-python-module (capabilities) with + # those permitted by parent. + # + # NOTE, only the root actor should have + # a pre-permitted entry for `.devx.debug._tty_lock`. + assert not self.enable_modules + self.enable_modules.update( + spawnspec.enable_modules + ) + + self._parent_main_data = spawnspec._parent_main_data + # XXX QUESTION(s)^^^ + # -[ ] already set in `.__init__()` right, but how is + # it diff from this blatant parent copy? + # -[ ] do we need/want the .__init__() value in + # just the root case orr? return ( chan, accept_addrs, + _state._runtime_vars['_enable_tpts'] ) - except OSError: # failed to connect + # failed to connect back? + except ( + OSError, + ConnectionError, + ): log.warning( f'Failed to connect to spawning parent actor!?\n' + f'\n' f'x=> {parent_addr}\n' - f'|_{self}\n\n' + f' |_{self}\n\n' ) await self.cancel(req_chan=None) # self cancel raise - async def _serve_forever( - self, - handler_nursery: Nursery, - *, - # (host, port) to bind for channel server - listen_sockaddrs: list[tuple[str, int]]|None = None, - - task_status: TaskStatus[Nursery] = trio.TASK_STATUS_IGNORED, - ) -> None: - ''' - Start the IPC transport server, begin listening for new connections. - - This will cause an actor to continue living (and thus - blocking at the process/OS-thread level) until - `.cancel_server()` is called. - - ''' - if listen_sockaddrs is None: - listen_sockaddrs = [(None, 0)] - - self._server_down = trio.Event() - try: - async with trio.open_nursery() as server_n: - - for host, port in listen_sockaddrs: - listeners: list[trio.abc.Listener] = await server_n.start( - partial( - trio.serve_tcp, - - handler=self._stream_handler, - port=port, - host=host, - - # NOTE: configured such that new - # connections will stay alive even if - # this server is cancelled! - handler_nursery=handler_nursery, - ) - ) - sockets: list[trio.socket] = [ - getattr(listener, 'socket', 'unknown socket') - for listener in listeners - ] - log.runtime( - 'Started TCP server(s)\n' - f'|_{sockets}\n' - ) - self._listeners.extend(listeners) - - task_status.started(server_n) - - finally: - # signal the server is down since nursery above terminated - self._server_down.set() - def cancel_soon(self) -> None: ''' Cancel this actor asap; can be called from a sync context. @@ -1298,7 +968,7 @@ class Actor: # kill any debugger request task to avoid deadlock # with the root actor in this tree - debug_req = _debug.DebugStatus + debug_req = debug.DebugStatus lock_req_ctx: Context = debug_req.req_ctx if ( lock_req_ctx @@ -1308,7 +978,7 @@ class Actor: msg += ( f'\n' f'-> Cancelling active debugger request..\n' - f'|_{_debug.Lock.repr()}\n\n' + f'|_{debug.Lock.repr()}\n\n' f'|_{lock_req_ctx}\n\n' ) # lock_req_ctx._scope.cancel() @@ -1323,13 +993,9 @@ class Actor: ) # stop channel server - self.cancel_server() - if self._server_down is not None: - await self._server_down.wait() - else: - log.warning( - 'Transport[TCP] server was cancelled start?' - ) + if ipc_server := self.ipc_server: + ipc_server.cancel() + await ipc_server.wait_for_shutdown() # cancel all rpc tasks permanently if self._service_n: @@ -1560,45 +1226,22 @@ class Actor: ) await self._ongoing_rpc_tasks.wait() - def cancel_server(self) -> bool: - ''' - Cancel the internal IPC transport server nursery thereby - preventing any new inbound IPC connections establishing. - - ''' - if self._server_n: - # TODO: obvi a different server type when we eventually - # support some others XD - server_prot: str = 'TCP' - log.runtime( - f'Cancelling {server_prot} server' - ) - self._server_n.cancel_scope.cancel() - return True - - return False - @property - def accept_addrs(self) -> list[tuple[str, int]]: + def accept_addrs(self) -> list[UnwrappedAddress]: ''' All addresses to which the transport-channel server binds and listens for new connections. ''' - # throws OSError on failure - return [ - listener.socket.getsockname() - for listener in self._listeners - ] # type: ignore + return self._ipc_server.accept_addrs @property - def accept_addr(self) -> tuple[str, int]: + def accept_addr(self) -> UnwrappedAddress: ''' Primary address to which the IPC transport server is bound and listening for new connections. ''' - # throws OSError on failure return self.accept_addrs[0] def get_parent(self) -> Portal: @@ -1620,43 +1263,6 @@ class Actor: ''' return self._peers[uid] - # TODO: move to `Channel.handshake(uid)` - async def _do_handshake( - self, - chan: Channel - - ) -> msgtypes.Aid: - ''' - Exchange `(name, UUIDs)` identifiers as the first - communication step with any (peer) remote `Actor`. - - These are essentially the "mailbox addresses" found in - "actor model" parlance. - - ''' - name, uuid = self.uid - await chan.send( - msgtypes.Aid( - name=name, - uuid=uuid, - ) - ) - aid: msgtypes.Aid = await chan.recv() - chan.aid = aid - - uid: tuple[str, str] = ( - # str(value[0]), - # str(value[1]) - aid.name, - aid.uuid, - ) - - if not isinstance(uid, tuple): - raise ValueError(f"{uid} is not a valid uid?!") - - chan.uid = uid - return uid - def is_infected_aio(self) -> bool: ''' If `True`, this actor is running `trio` in guest mode on @@ -1670,7 +1276,7 @@ class Actor: async def async_main( actor: Actor, - accept_addrs: tuple[str, int]|None = None, + accept_addrs: UnwrappedAddress|None = None, # XXX: currently ``parent_addr`` is only needed for the # ``multiprocessing`` backend (which pickles state sent to @@ -1679,7 +1285,7 @@ async def async_main( # change this to a simple ``is_subactor: bool`` which will # be False when running as root actor and True when as # a subactor. - parent_addr: tuple[str, int]|None = None, + parent_addr: UnwrappedAddress|None = None, task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED, ) -> None: @@ -1694,22 +1300,30 @@ async def async_main( the actor's "runtime" and all thus all ongoing RPC tasks. ''' + # XXX NOTE, `_state._current_actor` **must** be set prior to + # calling this core runtime entrypoint! + assert actor is _state.current_actor() + + actor._task: trio.Task = trio.lowlevel.current_task() + # attempt to retreive ``trio``'s sigint handler and stash it # on our debugger state. - _debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) + debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) is_registered: bool = False try: # establish primary connection with immediate parent actor._parent_chan: Channel|None = None - if parent_addr is not None: + if parent_addr is not None: ( actor._parent_chan, set_accept_addr_says_rent, + maybe_preferred_transports_says_rent, ) = await actor._from_parent(parent_addr) + accept_addrs: list[UnwrappedAddress] = [] # either it's passed in because we're not a child or # because we're running in mp mode if ( @@ -1718,7 +1332,20 @@ async def async_main( set_accept_addr_says_rent is not None ): accept_addrs = set_accept_addr_says_rent + else: + enable_transports: list[str] = ( + maybe_preferred_transports_says_rent + or + [_state._def_tpt_proto] + ) + for transport_key in enable_transports: + transport_cls: Type[Address] = get_address_cls( + transport_key + ) + addr: Address = transport_cls.get_random() + accept_addrs.append(addr.unwrap()) + assert accept_addrs # The "root" nursery ensures the channel with the immediate # parent is kept alive as a resilient service until # cancellation steps have (mostly) occurred in @@ -1729,15 +1356,36 @@ async def async_main( actor._root_n = root_nursery assert actor._root_n - async with trio.open_nursery( - strict_exception_groups=False, - ) as service_nursery: + ipc_server: _server.IPCServer + async with ( + trio.open_nursery( + strict_exception_groups=False, + ) as service_nursery, + + _server.open_ipc_server( + parent_tn=service_nursery, + stream_handler_tn=service_nursery, + ) as ipc_server, + # ) as actor._ipc_server, + # ^TODO? prettier? + + ): # This nursery is used to handle all inbound # connections to us such that if the TCP server # is killed, connections can continue to process # in the background until this nursery is cancelled. actor._service_n = service_nursery - assert actor._service_n + actor._ipc_server = ipc_server + assert ( + actor._service_n + and ( + actor._service_n + is + actor._ipc_server._parent_tn + is + ipc_server._stream_handler_tn + ) + ) # load exposed/allowed RPC modules # XXX: do this **after** establishing a channel to the parent @@ -1753,7 +1401,7 @@ async def async_main( # try: # actor.load_modules() # except ModuleNotFoundError as err: - # _debug.pause_from_sync() + # debug.pause_from_sync() # import pdbp; pdbp.set_trace() # raise @@ -1761,42 +1409,54 @@ async def async_main( # - subactor: the bind address is sent by our parent # over our established channel # - root actor: the ``accept_addr`` passed to this method - assert accept_addrs + # TODO: why is this not with the root nursery? try: - # TODO: why is this not with the root nursery? - actor._server_n = await service_nursery.start( - partial( - actor._serve_forever, - service_nursery, - listen_sockaddrs=accept_addrs, - ) + log.runtime( + 'Booting IPC server' ) + eps: list = await ipc_server.listen_on( + accept_addrs=accept_addrs, + stream_handler_nursery=service_nursery, + ) + log.runtime( + f'Booted IPC server\n' + f'{ipc_server}\n' + ) + assert ( + (eps[0].listen_tn) + is not service_nursery + ) + except OSError as oserr: # NOTE: always allow runtime hackers to debug # tranport address bind errors - normally it's # something silly like the wrong socket-address # passed via a config or CLI Bo - entered_debug: bool = await _debug._maybe_enter_pm(oserr) + entered_debug: bool = await debug._maybe_enter_pm( + oserr, + ) if not entered_debug: - log.exception('Failed to init IPC channel server !?\n') + log.exception('Failed to init IPC server !?\n') else: log.runtime('Exited debug REPL..') raise - accept_addrs: list[tuple[str, int]] = actor.accept_addrs + # TODO, just read direct from ipc_server? + accept_addrs: list[UnwrappedAddress] = actor.accept_addrs # NOTE: only set the loopback addr for the # process-tree-global "root" mailbox since # all sub-actors should be able to speak to # their root actor over that channel. if _state._runtime_vars['_is_root']: + raddrs: list[Address] = _state._runtime_vars['_root_addrs'] for addr in accept_addrs: - host, _ = addr - # TODO: generic 'lo' detector predicate - if '127.0.0.1' in host: - _state._runtime_vars['_root_mailbox'] = addr + waddr: Address = wrap_address(addr) + raddrs.append(addr) + else: + _state._runtime_vars['_root_mailbox'] = raddrs[0] # Register with the arbiter if we're told its addr log.runtime( @@ -1810,24 +1470,23 @@ async def async_main( # only on unique actor uids? for addr in actor.reg_addrs: try: - assert isinstance(addr, tuple) - assert addr[1] # non-zero after bind + waddr = wrap_address(addr) + assert waddr.is_valid except AssertionError: - await _debug.pause() + await debug.pause() - async with get_registry(*addr) as reg_portal: + async with get_registry(addr) as reg_portal: for accept_addr in accept_addrs: + accept_addr = wrap_address(accept_addr) - if not accept_addr[1]: - await _debug.pause() - - assert accept_addr[1] + if not accept_addr.is_valid: + breakpoint() await reg_portal.run_from_ns( 'self', 'register_actor', uid=actor.uid, - sockaddr=accept_addr, + addr=accept_addr.unwrap(), ) is_registered: bool = True @@ -1842,9 +1501,8 @@ async def async_main( if actor._parent_chan: await root_nursery.start( partial( - process_messages, - actor, - actor._parent_chan, + _rpc.process_messages, + chan=actor._parent_chan, shield=True, ) ) @@ -1885,7 +1543,7 @@ async def async_main( log.exception(err_report) if actor._parent_chan: - await try_ship_error_to_remote( + await _rpc.try_ship_error_to_remote( actor._parent_chan, internal_err, ) @@ -1932,7 +1590,7 @@ async def async_main( # prevents any `infected_aio` actor from continuing # and any callbacks in the `ls` here WILL NOT be # called!! - # await _debug.pause(shield=True) + # await debug.pause(shield=True) ls.close() @@ -1945,7 +1603,7 @@ async def async_main( # # if actor.name == 'brokerd.ib': # with CancelScope(shield=True): - # await _debug.breakpoint() + # await debug.breakpoint() # Unregister actor from the registry-sys / registrar. if ( @@ -1954,12 +1612,13 @@ async def async_main( ): failed: bool = False for addr in actor.reg_addrs: - assert isinstance(addr, tuple) + waddr = wrap_address(addr) + assert waddr.is_valid with trio.move_on_after(0.5) as cs: cs.shield = True try: async with get_registry( - *addr, + addr, ) as reg_portal: await reg_portal.run_from_ns( 'self', @@ -1978,16 +1637,18 @@ async def async_main( ) # Ensure all peers (actors connected to us as clients) are finished - if not actor._no_more_peers.is_set(): - if any( - chan.connected() for chan in chain(*actor._peers.values()) - ): - teardown_report += ( - f'-> Waiting for remaining peers {actor._peers} to clear..\n' - ) - log.runtime(teardown_report) - with CancelScope(shield=True): - await actor._no_more_peers.wait() + if ( + (ipc_server := actor.ipc_server) + and + ipc_server.has_peers(check_chans=True) + ): + teardown_report += ( + f'-> Waiting for remaining peers {ipc_server._peers} to clear..\n' + ) + log.runtime(teardown_report) + await ipc_server.wait_for_no_more_peers( + shield=True, + ) teardown_report += ( '-> All peer channels are complete\n' @@ -2001,15 +1662,15 @@ async def async_main( log.info(teardown_report) -# TODO: rename to `Registry` and move to `._discovery`! +# TODO: rename to `Registry` and move to `.discovery._registry`! class Arbiter(Actor): ''' - A special registrar actor who can contact all other actors - within its immediate process tree and possibly keeps a registry - of others meant to be discoverable in a distributed - application. Normally the registrar is also the "root actor" - and thus always has access to the top-most-level actor - (process) nursery. + A special registrar (and for now..) `Actor` who can contact all + other actors within its immediate process tree and possibly keeps + a registry of others meant to be discoverable in a distributed + application. Normally the registrar is also the "root actor" and + thus always has access to the top-most-level actor (process) + nursery. By default, the registrar is always initialized when and if no other registrar socket addrs have been specified to runtime @@ -2029,6 +1690,12 @@ class Arbiter(Actor): ''' is_arbiter = True + # TODO, implement this as a read on there existing a `._state` of + # some sort setup by whenever we impl this all as + # a `.discovery._registry.open_registry()` API + def is_registry(self) -> bool: + return self.is_arbiter + def __init__( self, *args, @@ -2037,7 +1704,7 @@ class Arbiter(Actor): self._registry: dict[ tuple[str, str], - tuple[str, int], + UnwrappedAddress, ] = {} self._waiters: dict[ str, @@ -2053,18 +1720,18 @@ class Arbiter(Actor): self, name: str, - ) -> tuple[str, int]|None: + ) -> UnwrappedAddress|None: - for uid, sockaddr in self._registry.items(): + for uid, addr in self._registry.items(): if name in uid: - return sockaddr + return addr return None async def get_registry( self - ) -> dict[str, tuple[str, int]]: + ) -> dict[str, UnwrappedAddress]: ''' Return current name registry. @@ -2084,7 +1751,7 @@ class Arbiter(Actor): self, name: str, - ) -> list[tuple[str, int]]: + ) -> list[UnwrappedAddress]: ''' Wait for a particular actor to register. @@ -2092,44 +1759,41 @@ class Arbiter(Actor): registered. ''' - sockaddrs: list[tuple[str, int]] = [] - sockaddr: tuple[str, int] + addrs: list[UnwrappedAddress] = [] + addr: UnwrappedAddress mailbox_info: str = 'Actor registry contact infos:\n' - for uid, sockaddr in self._registry.items(): + for uid, addr in self._registry.items(): mailbox_info += ( f'|_uid: {uid}\n' - f'|_sockaddr: {sockaddr}\n\n' + f'|_addr: {addr}\n\n' ) if name == uid[0]: - sockaddrs.append(sockaddr) + addrs.append(addr) - if not sockaddrs: + if not addrs: waiter = trio.Event() self._waiters.setdefault(name, []).append(waiter) await waiter.wait() for uid in self._waiters[name]: if not isinstance(uid, trio.Event): - sockaddrs.append(self._registry[uid]) + addrs.append(self._registry[uid]) log.runtime(mailbox_info) - return sockaddrs + return addrs async def register_actor( self, uid: tuple[str, str], - sockaddr: tuple[str, int] - + addr: UnwrappedAddress ) -> None: uid = name, hash = (str(uid[0]), str(uid[1])) - addr = (host, port) = ( - str(sockaddr[0]), - int(sockaddr[1]), - ) - if port == 0: - await _debug.pause() - assert port # should never be 0-dynamic-os-alloc + waddr: Address = wrap_address(addr) + if not waddr.is_valid: + # should never be 0-dynamic-os-alloc + await debug.pause() + self._registry[uid] = addr # pop and signal all waiter events diff --git a/tractor/_spawn.py b/tractor/_spawn.py index 3159508d..a3e3194e 100644 --- a/tractor/_spawn.py +++ b/tractor/_spawn.py @@ -34,7 +34,7 @@ from typing import ( import trio from trio import TaskStatus -from .devx._debug import ( +from .devx.debug import ( maybe_wait_for_debugger, acquire_debug_lock, ) @@ -46,19 +46,23 @@ from tractor._state import ( _runtime_vars, ) from tractor.log import get_logger +from tractor._addr import UnwrappedAddress from tractor._portal import Portal from tractor._runtime import Actor from tractor._entry import _mp_main from tractor._exceptions import ActorFailure from tractor.msg.types import ( + Aid, SpawnSpec, ) if TYPE_CHECKING: + from ipc import IPCServer from ._supervise import ActorNursery ProcessType = TypeVar('ProcessType', mp.Process, trio.Process) + log = get_logger('tractor') # placeholder for an mp start context if so using that backend @@ -163,7 +167,7 @@ async def exhaust_portal( # TODO: merge with above? log.warning( 'Cancelled portal result waiter task:\n' - f'uid: {portal.channel.uid}\n' + f'uid: {portal.channel.aid}\n' f'error: {err}\n' ) return err @@ -171,7 +175,7 @@ async def exhaust_portal( else: log.debug( f'Returning final result from portal:\n' - f'uid: {portal.channel.uid}\n' + f'uid: {portal.channel.aid}\n' f'result: {final}\n' ) return final @@ -324,12 +328,12 @@ async def soft_kill( see `.hard_kill()`). ''' - uid: tuple[str, str] = portal.channel.uid + peer_aid: Aid = portal.channel.aid try: log.cancel( f'Soft killing sub-actor via portal request\n' f'\n' - f'(c=> {portal.chan.uid}\n' + f'(c=> {peer_aid}\n' f' |_{proc}\n' ) # wait on sub-proc to signal termination @@ -378,7 +382,7 @@ async def soft_kill( if proc.poll() is None: # type: ignore log.warning( 'Subactor still alive after cancel request?\n\n' - f'uid: {uid}\n' + f'uid: {peer_aid}\n' f'|_{proc}\n' ) n.cancel_scope.cancel() @@ -392,14 +396,15 @@ async def new_proc( errors: dict[tuple[str, str], Exception], # passed through to actor main - bind_addrs: list[tuple[str, int]], - parent_addr: tuple[str, int], + bind_addrs: list[UnwrappedAddress], + parent_addr: UnwrappedAddress, _runtime_vars: dict[str, Any], # serialized and sent to _child *, infect_asyncio: bool = False, - task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED + task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED, + proc_kwargs: dict[str, any] = {} ) -> None: @@ -419,6 +424,7 @@ async def new_proc( _runtime_vars, # run time vars infect_asyncio=infect_asyncio, task_status=task_status, + proc_kwargs=proc_kwargs ) @@ -429,12 +435,13 @@ async def trio_proc( errors: dict[tuple[str, str], Exception], # passed through to actor main - bind_addrs: list[tuple[str, int]], - parent_addr: tuple[str, int], + bind_addrs: list[UnwrappedAddress], + parent_addr: UnwrappedAddress, _runtime_vars: dict[str, Any], # serialized and sent to _child *, infect_asyncio: bool = False, - task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED + task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED, + proc_kwargs: dict[str, any] = {} ) -> None: ''' @@ -456,6 +463,9 @@ async def trio_proc( # the OS; it otherwise can be passed via the parent channel if # we prefer in the future (for privacy). "--uid", + # TODO, how to pass this over "wire" encodings like + # cmdline args? + # -[ ] maybe we can add an `Aid.min_tuple()` ? str(subactor.uid), # Address the child must connect to on startup "--parent_addr", @@ -473,9 +483,10 @@ async def trio_proc( cancelled_during_spawn: bool = False proc: trio.Process|None = None + ipc_server: IPCServer = actor_nursery._actor.ipc_server try: try: - proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd) + proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd, **proc_kwargs) log.runtime( 'Started new child\n' f'|_{proc}\n' @@ -484,7 +495,7 @@ async def trio_proc( # wait for actor to spawn and connect back to us # channel should have handshake completed by the # local actor by the time we get a ref to it - event, chan = await actor_nursery._actor.wait_for_peer( + event, chan = await ipc_server.wait_for_peer( subactor.uid ) @@ -517,15 +528,15 @@ async def trio_proc( # send a "spawning specification" which configures the # initial runtime state of the child. - await chan.send( - SpawnSpec( - _parent_main_data=subactor._parent_main_data, - enable_modules=subactor.enable_modules, - reg_addrs=subactor.reg_addrs, - bind_addrs=bind_addrs, - _runtime_vars=_runtime_vars, - ) + sspec = SpawnSpec( + _parent_main_data=subactor._parent_main_data, + enable_modules=subactor.enable_modules, + reg_addrs=subactor.reg_addrs, + bind_addrs=bind_addrs, + _runtime_vars=_runtime_vars, ) + log.runtime(f'Sending spawn spec: {str(sspec)}') + await chan.send(sspec) # track subactor in current nursery curr_actor: Actor = current_actor() @@ -635,12 +646,13 @@ async def mp_proc( subactor: Actor, errors: dict[tuple[str, str], Exception], # passed through to actor main - bind_addrs: list[tuple[str, int]], - parent_addr: tuple[str, int], + bind_addrs: list[UnwrappedAddress], + parent_addr: UnwrappedAddress, _runtime_vars: dict[str, Any], # serialized and sent to _child *, infect_asyncio: bool = False, - task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED + task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED, + proc_kwargs: dict[str, any] = {} ) -> None: @@ -715,12 +727,14 @@ async def mp_proc( log.runtime(f"Started {proc}") + ipc_server: IPCServer = actor_nursery._actor.ipc_server try: # wait for actor to spawn and connect back to us # channel should have handshake completed by the # local actor by the time we get a ref to it - event, chan = await actor_nursery._actor.wait_for_peer( - subactor.uid) + event, chan = await ipc_server.wait_for_peer( + subactor.uid, + ) # XXX: monkey patch poll API to match the ``subprocess`` API.. # not sure why they don't expose this but kk. diff --git a/tractor/_state.py b/tractor/_state.py index 79c8bdea..2a47e548 100644 --- a/tractor/_state.py +++ b/tractor/_state.py @@ -14,16 +14,19 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -""" -Per process state +''' +Per actor-process runtime state mgmt APIs. -""" +''' from __future__ import annotations from contextvars import ( ContextVar, ) +import os +from pathlib import Path from typing import ( Any, + Literal, TYPE_CHECKING, ) @@ -34,20 +37,39 @@ if TYPE_CHECKING: from ._context import Context +# default IPC transport protocol settings +TransportProtocolKey = Literal[ + 'tcp', + 'uds', +] +_def_tpt_proto: TransportProtocolKey = 'tcp' + _current_actor: Actor|None = None # type: ignore # noqa _last_actor_terminated: Actor|None = None # TODO: mk this a `msgspec.Struct`! +# -[ ] type out all fields obvi! +# -[ ] (eventually) mk wire-ready for monitoring? _runtime_vars: dict[str, Any] = { - '_debug_mode': False, - '_is_root': False, - '_root_mailbox': (None, None), + # root of actor-process tree info + '_is_root': False, # bool + '_root_mailbox': (None, None), # tuple[str|None, str|None] + '_root_addrs': [], # tuple[str|None, str|None] + + # parent->chld ipc protocol caps + '_enable_tpts': [_def_tpt_proto], + + # registrar info '_registry_addrs': [], - '_is_infected_aio': False, - + # `debug_mode: bool` settings + '_debug_mode': False, # bool + 'repl_fixture': False, # |AbstractContextManager[bool] # for `tractor.pause_from_sync()` & `breakpoint()` support 'use_greenback': False, + + # infected-`asyncio`-mode: `trio` running as guest. + '_is_infected_aio': False, } @@ -99,7 +121,7 @@ def current_actor( return _current_actor -def is_main_process() -> bool: +def is_root_process() -> bool: ''' Bool determining if this actor is running in the top-most process. @@ -108,8 +130,10 @@ def is_main_process() -> bool: return mp.current_process().name == 'MainProcess' -# TODO, more verby name? -def debug_mode() -> bool: +is_main_process = is_root_process + + +def is_debug_mode() -> bool: ''' Bool determining if "debug mode" is on which enables remote subactor pdb entry on crashes. @@ -118,6 +142,9 @@ def debug_mode() -> bool: return bool(_runtime_vars['_debug_mode']) +debug_mode = is_debug_mode + + def is_root_process() -> bool: return _runtime_vars['_is_root'] @@ -143,3 +170,34 @@ def current_ipc_ctx( f'|_{current_task()}\n' ) return ctx + + +# std ODE (mutable) app state location +_rtdir: Path = Path(os.environ['XDG_RUNTIME_DIR']) + + +def get_rt_dir( + subdir: str = 'tractor' +) -> Path: + ''' + Return the user "runtime dir" where most userspace apps stick + their IPC and cache related system util-files; we take hold + of a `'XDG_RUNTIME_DIR'/tractor/` subdir by default. + + ''' + rtdir: Path = _rtdir / subdir + if not rtdir.is_dir(): + rtdir.mkdir() + return rtdir + + +def current_ipc_protos() -> list[str]: + ''' + Return the list of IPC transport protocol keys currently + in use by this actor. + + The keys are as declared by `MsgTransport` and `Address` + concrete-backend sub-types defined throughout `tractor.ipc`. + + ''' + return _runtime_vars['_enable_tpts'] diff --git a/tractor/_streaming.py b/tractor/_streaming.py index 2ff2d41c..4683f35d 100644 --- a/tractor/_streaming.py +++ b/tractor/_streaming.py @@ -56,7 +56,7 @@ from tractor.msg import ( if TYPE_CHECKING: from ._runtime import Actor from ._context import Context - from ._ipc import Channel + from .ipc import Channel log = get_logger(__name__) @@ -426,8 +426,8 @@ class MsgStream(trio.abc.Channel): self._closed = re # if caught_eoc: - # # from .devx import _debug - # # await _debug.pause() + # # from .devx import debug + # # await debug.pause() # with trio.CancelScope(shield=True): # await rx_chan.aclose() @@ -437,22 +437,23 @@ class MsgStream(trio.abc.Channel): message: str = ( f'Stream self-closed by {this_side!r}-side before EoC from {peer_side!r}\n' # } bc a stream is a "scope"/msging-phase inside an IPC - f'x}}>\n' + f'c}}>\n' f' |_{self}\n' ) - log.cancel(message) - self._eoc = trio.EndOfChannel(message) - if ( (rx_chan := self._rx_chan) and (stats := rx_chan.statistics()).tasks_waiting_receive ): - log.cancel( - f'Msg-stream is closing but there is still reader tasks,\n' + message += ( + f'AND there is still reader tasks,\n' + f'\n' f'{stats}\n' ) + log.cancel(message) + self._eoc = trio.EndOfChannel(message) + # ?XXX WAIT, why do we not close the local mem chan `._rx_chan` XXX? # => NO, DEFINITELY NOT! <= # if we're a bi-dir `MsgStream` BECAUSE this same @@ -595,8 +596,17 @@ class MsgStream(trio.abc.Channel): trio.ClosedResourceError, trio.BrokenResourceError, BrokenPipeError, - ) as trans_err: - if hide_tb: + ) as _trans_err: + trans_err = _trans_err + if ( + hide_tb + and + self._ctx.chan._exc is trans_err + # ^XXX, IOW, only if the channel is marked errored + # for the same reason as whatever its underlying + # transport raised, do we keep the full low-level tb + # suppressed from the user. + ): raise type(trans_err)( *trans_err.args ) from trans_err @@ -802,13 +812,12 @@ async def open_stream_from_ctx( # sanity, can remove? assert eoc is stream._eoc - log.warning( + log.runtime( 'Stream was terminated by EoC\n\n' # NOTE: won't show the error but # does show txt followed by IPC msg. f'{str(eoc)}\n' ) - finally: if ctx._portal: try: diff --git a/tractor/_supervise.py b/tractor/_supervise.py index bc6bc983..0a0463dc 100644 --- a/tractor/_supervise.py +++ b/tractor/_supervise.py @@ -22,13 +22,20 @@ from contextlib import asynccontextmanager as acm from functools import partial import inspect from pprint import pformat -from typing import TYPE_CHECKING +from typing import ( + TYPE_CHECKING, +) import typing import warnings import trio -from .devx._debug import maybe_wait_for_debugger + +from .devx.debug import maybe_wait_for_debugger +from ._addr import ( + UnwrappedAddress, + mk_uuid, +) from ._state import current_actor, is_main_process from .log import get_logger, get_loglevel from ._runtime import Actor @@ -37,18 +44,21 @@ from ._exceptions import ( is_multi_cancelled, ContextCancelled, ) -from ._root import open_root_actor +from ._root import ( + open_root_actor, +) from . import _state from . import _spawn if TYPE_CHECKING: import multiprocessing as mp + # from .ipc._server import IPCServer + from .ipc import IPCServer + log = get_logger(__name__) -_default_bind_addr: tuple[str, int] = ('127.0.0.1', 0) - class ActorNursery: ''' @@ -130,8 +140,9 @@ class ActorNursery: *, - bind_addrs: list[tuple[str, int]] = [_default_bind_addr], + bind_addrs: list[UnwrappedAddress]|None = None, rpc_module_paths: list[str]|None = None, + enable_transports: list[str] = [_state._def_tpt_proto], enable_modules: list[str]|None = None, loglevel: str|None = None, # set log level per subactor debug_mode: bool|None = None, @@ -141,6 +152,7 @@ class ActorNursery: # a `._ria_nursery` since the dependent APIs have been # removed! nursery: trio.Nursery|None = None, + proc_kwargs: dict[str, any] = {} ) -> Portal: ''' @@ -177,7 +189,9 @@ class ActorNursery: enable_modules.extend(rpc_module_paths) subactor = Actor( - name, + name=name, + uuid=mk_uuid(), + # modules allowed to invoked funcs from enable_modules=enable_modules, loglevel=loglevel, @@ -185,7 +199,7 @@ class ActorNursery: # verbatim relay this actor's registrar addresses registry_addrs=current_actor().reg_addrs, ) - parent_addr = self._actor.accept_addr + parent_addr: UnwrappedAddress = self._actor.accept_addr assert parent_addr # start a task to spawn a process @@ -204,6 +218,7 @@ class ActorNursery: parent_addr, _rtv, # run time vars infect_asyncio=infect_asyncio, + proc_kwargs=proc_kwargs ) ) @@ -222,11 +237,12 @@ class ActorNursery: *, name: str | None = None, - bind_addrs: tuple[str, int] = [_default_bind_addr], + bind_addrs: UnwrappedAddress|None = None, rpc_module_paths: list[str] | None = None, enable_modules: list[str] | None = None, loglevel: str | None = None, # set log level per subactor infect_asyncio: bool = False, + proc_kwargs: dict[str, any] = {}, **kwargs, # explicit args to ``fn`` @@ -257,6 +273,7 @@ class ActorNursery: # use the run_in_actor nursery nursery=self._ria_nursery, infect_asyncio=infect_asyncio, + proc_kwargs=proc_kwargs ) # XXX: don't allow stream funcs @@ -301,8 +318,13 @@ class ActorNursery: children: dict = self._children child_count: int = len(children) msg: str = f'Cancelling actor nursery with {child_count} children\n' + + server: IPCServer = self._actor.ipc_server + with trio.move_on_after(3) as cs: - async with trio.open_nursery() as tn: + async with trio.open_nursery( + strict_exception_groups=False, + ) as tn: subactor: Actor proc: trio.Process @@ -321,7 +343,7 @@ class ActorNursery: else: if portal is None: # actor hasn't fully spawned yet - event = self._actor._peer_connected[subactor.uid] + event: trio.Event = server._peer_connected[subactor.uid] log.warning( f"{subactor.uid} never 't finished spawning?" ) @@ -337,7 +359,7 @@ class ActorNursery: if portal is None: # cancelled while waiting on the event # to arrive - chan = self._actor._peers[subactor.uid][-1] + chan = server._peers[subactor.uid][-1] if chan: portal = Portal(chan) else: # there's no other choice left diff --git a/tractor/_testing/__init__.py b/tractor/_testing/__init__.py index 88860d13..8b906d11 100644 --- a/tractor/_testing/__init__.py +++ b/tractor/_testing/__init__.py @@ -26,7 +26,7 @@ import os import pathlib import tractor -from tractor.devx._debug import ( +from tractor.devx.debug import ( BoxedMaybeException, ) from .pytest import ( @@ -37,6 +37,9 @@ from .fault_simulation import ( ) +# TODO, use dulwhich for this instead? +# -> we're going to likely need it (or something similar) +# for supporting hot-coad reload feats eventually anyway! def repodir() -> pathlib.Path: ''' Return the abspath to the repo directory. diff --git a/tractor/_testing/addr.py b/tractor/_testing/addr.py new file mode 100644 index 00000000..1b066336 --- /dev/null +++ b/tractor/_testing/addr.py @@ -0,0 +1,70 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +Random IPC addr generation for isolating +the discovery space between test sessions. + +Might be eventually useful to expose as a util set from +our `tractor.discovery` subsys? + +''' +import random +from typing import ( + Type, +) +from tractor import ( + _addr, +) + + +def get_rando_addr( + tpt_proto: str, + *, + + # choose random port at import time + _rando_port: str = random.randint(1000, 9999) + +) -> tuple[str, str|int]: + ''' + Used to globally override the runtime to the + per-test-session-dynamic addr so that all tests never conflict + with any other actor tree using the default. + + ''' + addr_type: Type[_addr.Addres] = _addr._address_types[tpt_proto] + def_reg_addr: tuple[str, int] = _addr._default_lo_addrs[tpt_proto] + + # this is the "unwrapped" form expected to be passed to + # `.open_root_actor()` by test body. + testrun_reg_addr: tuple[str, int|str] + match tpt_proto: + case 'tcp': + testrun_reg_addr = ( + addr_type.def_bindspace, + _rando_port, + ) + + # NOTE, file-name uniqueness (no-collisions) will be based on + # the runtime-directory and root (pytest-proc's) pid. + case 'uds': + testrun_reg_addr = addr_type.get_random().unwrap() + + # XXX, as sanity it should never the same as the default for the + # host-singleton registry actor. + assert def_reg_addr != testrun_reg_addr + + return testrun_reg_addr diff --git a/tractor/_testing/pytest.py b/tractor/_testing/pytest.py index 93eeaf72..1a2f63ab 100644 --- a/tractor/_testing/pytest.py +++ b/tractor/_testing/pytest.py @@ -26,29 +26,46 @@ from functools import ( import inspect import platform +import pytest import tractor import trio def tractor_test(fn): ''' - Decorator for async test funcs to present them as "native" - looking sync funcs runnable by `pytest` using `trio.run()`. + Decorator for async test fns to decorator-wrap them as "native" + looking sync funcs runnable by `pytest` and auto invoked with + `trio.run()` (much like the `pytest-trio` plugin's approach). - Use: + Further the test fn body will be invoked AFTER booting the actor + runtime, i.e. from inside a `tractor.open_root_actor()` block AND + with various runtime and tooling parameters implicitly passed as + requested by by the test session's config; see immediately below. - @tractor_test - async def test_whatever(): - await ... + Basic deco use: + --------------- - If fixtures: + @tractor_test + async def test_whatever(): + await ... - - ``reg_addr`` (a socket addr tuple where arbiter is listening) - - ``loglevel`` (logging level passed to tractor internals) - - ``start_method`` (subprocess spawning backend) - are defined in the `pytest` fixture space they will be automatically - injected to tests declaring these funcargs. + Runtime config via special fixtures: + ------------------------------------ + If any of the following fixture are requested by the wrapped test + fn (via normal func-args declaration), + + - `reg_addr` (a socket addr tuple where arbiter is listening) + - `loglevel` (logging level passed to tractor internals) + - `start_method` (subprocess spawning backend) + + (TODO support) + - `tpt_proto` (IPC transport protocol key) + + they will be automatically injected to each test as normally + expected as well as passed to the initial + `tractor.open_root_actor()` funcargs. + ''' @wraps(fn) def wrapper( @@ -111,3 +128,164 @@ def tractor_test(fn): return trio.run(main) return wrapper + + +def pytest_addoption( + parser: pytest.Parser, +): + # parser.addoption( + # "--ll", + # action="store", + # dest='loglevel', + # default='ERROR', help="logging level to set when testing" + # ) + + parser.addoption( + "--spawn-backend", + action="store", + dest='spawn_backend', + default='trio', + help="Processing spawning backend to use for test run", + ) + + parser.addoption( + "--tpdb", + "--debug-mode", + action="store_true", + dest='tractor_debug_mode', + # default=False, + help=( + 'Enable a flag that can be used by tests to to set the ' + '`debug_mode: bool` for engaging the internal ' + 'multi-proc debugger sys.' + ), + ) + + # provide which IPC transport protocols opting-in test suites + # should accumulatively run against. + parser.addoption( + "--tpt-proto", + nargs='+', # accumulate-multiple-args + action="store", + dest='tpt_protos', + default=['tcp'], + help="Transport protocol to use under the `tractor.ipc.Channel`", + ) + + +def pytest_configure(config): + backend = config.option.spawn_backend + tractor._spawn.try_set_start_method(backend) + + +@pytest.fixture(scope='session') +def debug_mode(request) -> bool: + ''' + Flag state for whether `--tpdb` (for `tractor`-py-debugger) + was passed to the test run. + + Normally tests should pass this directly to `.open_root_actor()` + to allow the user to opt into suite-wide crash handling. + + ''' + debug_mode: bool = request.config.option.tractor_debug_mode + return debug_mode + + +@pytest.fixture(scope='session') +def spawn_backend(request) -> str: + return request.config.option.spawn_backend + + +@pytest.fixture(scope='session') +def tpt_protos(request) -> list[str]: + + # allow quoting on CLI + proto_keys: list[str] = [ + proto_key.replace('"', '').replace("'", "") + for proto_key in request.config.option.tpt_protos + ] + + # ?TODO, eventually support multiple protos per test-sesh? + if len(proto_keys) > 1: + pytest.fail( + 'We only support one `--tpt-proto ` atm!\n' + ) + + # XXX ensure we support the protocol by name via lookup! + for proto_key in proto_keys: + addr_type = tractor._addr._address_types[proto_key] + assert addr_type.proto_key == proto_key + + yield proto_keys + + +@pytest.fixture( + scope='session', + autouse=True, +) +def tpt_proto( + tpt_protos: list[str], +) -> str: + proto_key: str = tpt_protos[0] + + from tractor import _state + if _state._def_tpt_proto != proto_key: + _state._def_tpt_proto = proto_key + + yield proto_key + + +@pytest.fixture(scope='session') +def reg_addr( + tpt_proto: str, +) -> tuple[str, int|str]: + ''' + Deliver a test-sesh unique registry address such + that each run's (tests which use this fixture) will + have no conflicts/cross-talk when running simultaneously + nor will interfere with other live `tractor` apps active + on the same network-host (namespace). + + ''' + from tractor._testing.addr import get_rando_addr + return get_rando_addr( + tpt_proto=tpt_proto, + ) + + +def pytest_generate_tests( + metafunc: pytest.Metafunc, +): + spawn_backend: str = metafunc.config.option.spawn_backend + + if not spawn_backend: + # XXX some weird windows bug with `pytest`? + spawn_backend = 'trio' + + # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? + assert spawn_backend in ( + 'mp_spawn', + 'mp_forkserver', + 'trio', + ) + + # NOTE: used-to-be-used-to dyanmically parametrize tests for when + # you just passed --spawn-backend=`mp` on the cli, but now we expect + # that cli input to be manually specified, BUT, maybe we'll do + # something like this again in the future? + if 'start_method' in metafunc.fixturenames: + metafunc.parametrize( + "start_method", + [spawn_backend], + scope='module', + ) + + # TODO, parametrize any `tpt_proto: str` declaring tests! + # proto_tpts: list[str] = metafunc.config.option.proto_tpts + # if 'tpt_proto' in metafunc.fixturenames: + # metafunc.parametrize( + # 'tpt_proto', + # proto_tpts, # TODO, double check this list usage! + # scope='module', + # ) diff --git a/tractor/_testing/samples.py b/tractor/_testing/samples.py new file mode 100644 index 00000000..a87a22c4 --- /dev/null +++ b/tractor/_testing/samples.py @@ -0,0 +1,35 @@ +import os +import random + + +def generate_sample_messages( + amount: int, + rand_min: int = 0, + rand_max: int = 0, + silent: bool = False +) -> tuple[list[bytes], int]: + + msgs = [] + size = 0 + + if not silent: + print(f'\ngenerating {amount} messages...') + + for i in range(amount): + msg = f'[{i:08}]'.encode('utf-8') + + if rand_max > 0: + msg += os.urandom( + random.randint(rand_min, rand_max)) + + size += len(msg) + + msgs.append(msg) + + if not silent and i and i % 10_000 == 0: + print(f'{i} generated') + + if not silent: + print(f'done, {size:,} bytes in total') + + return msgs, size diff --git a/tractor/devx/__init__.py b/tractor/devx/__init__.py index 7047dbdb..80c6744f 100644 --- a/tractor/devx/__init__.py +++ b/tractor/devx/__init__.py @@ -20,7 +20,7 @@ Runtime "developer experience" utils and addons to aid our and working with/on the actor runtime. """ -from ._debug import ( +from .debug import ( maybe_wait_for_debugger as maybe_wait_for_debugger, acquire_debug_lock as acquire_debug_lock, breakpoint as breakpoint, diff --git a/tractor/devx/_debug.py b/tractor/devx/_debug.py deleted file mode 100644 index c6ca1d89..00000000 --- a/tractor/devx/_debug.py +++ /dev/null @@ -1,3303 +0,0 @@ -# tractor: structured concurrent "actors". -# Copyright 2018-eternity Tyler Goodlet. - -# This program is free software: you can redistribute it and/or -# modify it under the terms of the GNU Affero General Public License -# as published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. - -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public -# License along with this program. If not, see -# . - -""" -Multi-core debugging for da peeps! - -""" -from __future__ import annotations -import asyncio -import bdb -from contextlib import ( - asynccontextmanager as acm, - contextmanager as cm, - nullcontext, - _GeneratorContextManager, - _AsyncGeneratorContextManager, -) -from functools import ( - partial, - cached_property, -) -import inspect -import os -import signal -import sys -import textwrap -import threading -import traceback -from typing import ( - Any, - Callable, - AsyncIterator, - AsyncGenerator, - TypeAlias, - TYPE_CHECKING, -) -from types import ( - FunctionType, - FrameType, - ModuleType, - TracebackType, - CodeType, -) - -from msgspec import Struct -import pdbp -import sniffio -import trio -from trio import CancelScope -from trio.lowlevel import ( - current_task, -) -from trio import ( - TaskStatus, -) -import tractor -from tractor.to_asyncio import run_trio_task_in_future -from tractor.log import get_logger -from tractor._context import Context -from tractor import _state -from tractor._exceptions import ( - InternalError, - NoRuntime, - is_multi_cancelled, -) -from tractor._state import ( - current_actor, - is_root_process, - debug_mode, - current_ipc_ctx, -) -# from .pformat import ( -# pformat_caller_frame, -# pformat_cs, -# ) - -if TYPE_CHECKING: - from trio.lowlevel import Task - from threading import Thread - from tractor._ipc import Channel - from tractor._runtime import ( - Actor, - ) - -log = get_logger(__name__) - -# TODO: refine the internal impl and APIs in this module! -# -# -[ ] rework `._pause()` and it's branch-cases for root vs. -# subactor: -# -[ ] `._pause_from_root()` + `_pause_from_subactor()`? -# -[ ] do the de-factor based on bg-thread usage in -# `.pause_from_sync()` & `_pause_from_bg_root_thread()`. -# -[ ] drop `debug_func == None` case which is confusing af.. -# -[ ] factor out `_enter_repl_sync()` into a util func for calling -# the `_set_trace()` / `_post_mortem()` APIs? -# -# -[ ] figure out if we need `acquire_debug_lock()` and/or re-implement -# it as part of the `.pause_from_sync()` rework per above? -# -# -[ ] pair the `._pause_from_subactor()` impl with a "debug nursery" -# that's dynamically allocated inside the `._rpc` task thus -# avoiding the `._service_n.start()` usage for the IPC request? -# -[ ] see the TODO inside `._rpc._errors_relayed_via_ipc()` -# -# -[ ] impl a `open_debug_request()` which encaps all -# `request_root_stdio_lock()` task scheduling deats -# + `DebugStatus` state mgmt; which should prolly be re-branded as -# a `DebugRequest` type anyway AND with suppoort for bg-thread -# (from root actor) usage? -# -# -[ ] handle the `xonsh` case for bg-root-threads in the SIGINT -# handler! -# -[ ] do we need to do the same for subactors? -# -[ ] make the failing tests finally pass XD -# -# -[ ] simplify `maybe_wait_for_debugger()` to be a root-task only -# API? -# -[ ] currently it's implemented as that so might as well make it -# formal? - - -def hide_runtime_frames() -> dict[FunctionType, CodeType]: - ''' - Hide call-stack frames for various std-lib and `trio`-API primitives - such that the tracebacks presented from our runtime are as minimized - as possible, particularly from inside a `PdbREPL`. - - ''' - # XXX HACKZONE XXX - # hide exit stack frames on nurseries and cancel-scopes! - # |_ so avoid seeing it when the `pdbp` REPL is first engaged from - # inside a `trio.open_nursery()` scope (with no line after it - # in before the block end??). - # - # TODO: FINALLY got this workin originally with - # `@pdbp.hideframe` around the `wrapper()` def embedded inside - # `_ki_protection_decoratior()`.. which is in the module: - # /home/goodboy/.virtualenvs/tractor311/lib/python3.11/site-packages/trio/_core/_ki.py - # - # -[ ] make an issue and patch for `trio` core? maybe linked - # to the long outstanding `pdb` one below? - # |_ it's funny that there's frame hiding throughout `._run.py` - # but not where it matters on the below exit funcs.. - # - # -[ ] provide a patchset for the lonstanding - # |_ https://github.com/python-trio/trio/issues/1155 - # - # -[ ] make a linked issue to ^ and propose allowing all the - # `._core._run` code to have their `__tracebackhide__` value - # configurable by a `RunVar` to allow getting scheduler frames - # if desired through configuration? - # - # -[ ] maybe dig into the core `pdb` issue why the extra frame is shown - # at all? - # - funcs: list[FunctionType] = [ - trio._core._run.NurseryManager.__aexit__, - trio._core._run.CancelScope.__exit__, - _GeneratorContextManager.__exit__, - _AsyncGeneratorContextManager.__aexit__, - _AsyncGeneratorContextManager.__aenter__, - trio.Event.wait, - ] - func_list_str: str = textwrap.indent( - "\n".join(f.__qualname__ for f in funcs), - prefix=' |_ ', - ) - log.devx( - 'Hiding the following runtime frames by default:\n' - f'{func_list_str}\n' - ) - - codes: dict[FunctionType, CodeType] = {} - for ref in funcs: - # stash a pre-modified version of each ref's code-obj - # so it can be reverted later if needed. - codes[ref] = ref.__code__ - pdbp.hideframe(ref) - # - # pdbp.hideframe(trio._core._run.NurseryManager.__aexit__) - # pdbp.hideframe(trio._core._run.CancelScope.__exit__) - # pdbp.hideframe(_GeneratorContextManager.__exit__) - # pdbp.hideframe(_AsyncGeneratorContextManager.__aexit__) - # pdbp.hideframe(_AsyncGeneratorContextManager.__aenter__) - # pdbp.hideframe(trio.Event.wait) - return codes - - -class LockStatus( - Struct, - tag=True, - tag_field='msg_type', -): - subactor_uid: tuple[str, str] - cid: str - locked: bool - - -class LockRelease( - Struct, - tag=True, - tag_field='msg_type', -): - subactor_uid: tuple[str, str] - cid: str - - -__pld_spec__: TypeAlias = LockStatus|LockRelease - - -# TODO: instantiate this only in root from factory -# so as to allow runtime errors from subactors. -class Lock: - ''' - Actor-tree-global debug lock state, exists only in a root process. - - Mostly to avoid a lot of global declarations for now XD. - - ''' - @staticmethod - def get_locking_task_cs() -> CancelScope|None: - if not is_root_process(): - raise RuntimeError( - '`Lock.locking_task_cs` is invalid in subactors!' - ) - - if ctx := Lock.ctx_in_debug: - return ctx._scope - - return None - - # TODO: once we convert to singleton-per-actor-style - # @property - # def stats(cls) -> trio.LockStatistics: - # return cls._debug_lock.statistics() - - # @property - # def owner(cls) -> Task: - # return cls._debug_lock.statistics().owner - - # ROOT ONLY - # ------ - ------- - # the root-actor-ONLY singletons for, - # - # - the uid of the actor who's task is using a REPL - # - a literal task-lock, - # - a shielded-cancel-scope around the acquiring task*, - # - a broadcast event to signal no-actor using a REPL in tree, - # - a filter list to block subs-by-uid from locking. - # - # * in case it needs to be manually cancelled in root due to - # a stale lock condition (eg. IPC failure with the locking - # child - ctx_in_debug: Context|None = None - req_handler_finished: trio.Event|None = None - - _owned_by_root: bool = False - _debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock() - _blocked: set[ - tuple[str, str] # `Actor.uid` for per actor - |str # Context.cid for per task - ] = set() - - @classmethod - def repr(cls) -> str: - lock_stats: trio.LockStatistics = cls._debug_lock.statistics() - req: trio.Event|None = cls.req_handler_finished - fields: str = ( - f'|_ ._blocked: {cls._blocked}\n' - f'|_ ._debug_lock: {cls._debug_lock}\n' - f' {lock_stats}\n\n' - - f'|_ .ctx_in_debug: {cls.ctx_in_debug}\n' - f'|_ .req_handler_finished: {req}\n' - ) - if req: - req_stats: trio.EventStatistics = req.statistics() - fields += f' {req_stats}\n' - - body: str = textwrap.indent( - fields, - prefix=' ', - ) - return ( - f'<{cls.__name__}(\n' - f'{body}' - ')>\n\n' - ) - - @classmethod - # @pdbp.hideframe - def release( - cls, - raise_on_thread: bool = True, - - ) -> bool: - ''' - Release the actor-tree global TTY stdio lock (only) from the - `trio.run()`-main-thread. - - ''' - we_released: bool = False - ctx_in_debug: Context|None = cls.ctx_in_debug - repl_task: Task|Thread|None = DebugStatus.repl_task - try: - if not DebugStatus.is_main_trio_thread(): - thread: threading.Thread = threading.current_thread() - message: str = ( - '`Lock.release()` can not be called from a non-main-`trio` thread!\n' - f'{thread}\n' - ) - if raise_on_thread: - raise RuntimeError(message) - - log.devx(message) - return False - - task: Task = current_task() - message: str = ( - 'TTY NOT RELEASED on behalf of caller\n' - f'|_{task}\n' - ) - - # sanity check that if we're the root actor - # the lock is marked as such. - # note the pre-release value may be diff the the - # post-release task. - if repl_task is task: - assert cls._owned_by_root - message: str = ( - 'TTY lock held by root-actor on behalf of local task\n' - f'|_{repl_task}\n' - ) - else: - assert DebugStatus.repl_task is not task - - lock: trio.StrictFIFOLock = cls._debug_lock - owner: Task = lock.statistics().owner - if ( - lock.locked() - and - (owner is task) - # ^-NOTE-^ if we do NOT ensure this, `trio` will - # raise a RTE when a non-owner tries to releasee the - # lock. - # - # Further we need to be extra pedantic about the - # correct task, greenback-spawned-task and/or thread - # being set to the `.repl_task` such that the above - # condition matches and we actually release the lock. - # - # This is particular of note from `.pause_from_sync()`! - ): - cls._debug_lock.release() - we_released: bool = True - if repl_task: - message: str = ( - 'TTY released on behalf of root-actor-local REPL owner\n' - f'|_{repl_task}\n' - ) - else: - message: str = ( - 'TTY released by us on behalf of remote peer?\n' - f'{ctx_in_debug}\n' - ) - - except RuntimeError as rte: - log.exception( - 'Failed to release `Lock._debug_lock: trio.FIFOLock`?\n' - ) - raise rte - - finally: - # IFF there are no more requesting tasks queued up fire, the - # "tty-unlocked" event thereby alerting any monitors of the lock that - # we are now back in the "tty unlocked" state. This is basically - # and edge triggered signal around an empty queue of sub-actor - # tasks that may have tried to acquire the lock. - lock_stats: trio.LockStatistics = cls._debug_lock.statistics() - req_handler_finished: trio.Event|None = Lock.req_handler_finished - if ( - not lock_stats.owner - and - req_handler_finished is None - ): - message += ( - '-> No new task holds the TTY lock!\n\n' - f'{Lock.repr()}\n' - ) - - elif ( - req_handler_finished # new IPC ctx debug request active - and - lock.locked() # someone has the lock - ): - behalf_of_task = ( - ctx_in_debug - or - repl_task - ) - message += ( - f'A non-caller task still owns this lock on behalf of\n' - f'{behalf_of_task}\n' - f'lock owner task: {lock_stats.owner}\n' - ) - - if ( - we_released - and - ctx_in_debug - ): - cls.ctx_in_debug = None # unset - - # post-release value (should be diff then value above!) - repl_task: Task|Thread|None = DebugStatus.repl_task - if ( - cls._owned_by_root - and - we_released - ): - cls._owned_by_root = False - - if task is not repl_task: - message += ( - 'Lock released by root actor on behalf of bg thread\n' - f'|_{repl_task}\n' - ) - - if message: - log.devx(message) - - return we_released - - @classmethod - @acm - async def acquire_for_ctx( - cls, - ctx: Context, - - ) -> AsyncIterator[trio.StrictFIFOLock]: - ''' - Acquire a root-actor local FIFO lock which tracks mutex access of - the process tree's global debugger breakpoint. - - This lock avoids tty clobbering (by preventing multiple processes - reading from stdstreams) and ensures multi-actor, sequential access - to the ``pdb`` repl. - - ''' - if not is_root_process(): - raise RuntimeError('Only callable by a root actor task!') - - # subactor_uid: tuple[str, str] = ctx.chan.uid - we_acquired: bool = False - log.runtime( - f'Attempting to acquire TTY lock for sub-actor\n' - f'{ctx}' - ) - try: - pre_msg: str = ( - f'Entering lock checkpoint for sub-actor\n' - f'{ctx}' - ) - stats = cls._debug_lock.statistics() - if owner := stats.owner: - pre_msg += ( - f'\n' - f'`Lock` already held by local task?\n' - f'{owner}\n\n' - # f'On behalf of task: {cls.remote_task_in_debug!r}\n' - f'On behalf of IPC ctx\n' - f'{ctx}' - ) - log.runtime(pre_msg) - - # NOTE: if the surrounding cancel scope from the - # `lock_stdio_for_peer()` caller is cancelled, this line should - # unblock and NOT leave us in some kind of - # a "child-locked-TTY-but-child-is-uncontactable-over-IPC" - # condition. - await cls._debug_lock.acquire() - cls.ctx_in_debug = ctx - we_acquired = True - - log.runtime( - f'TTY lock acquired for sub-actor\n' - f'{ctx}' - ) - - # NOTE: critical section: this yield is unshielded! - # - # IF we received a cancel during the shielded lock entry of some - # next-in-queue requesting task, then the resumption here will - # result in that ``trio.Cancelled`` being raised to our caller - # (likely from `lock_stdio_for_peer()` below)! In - # this case the ``finally:`` below should trigger and the - # surrounding caller side context should cancel normally - # relaying back to the caller. - - yield cls._debug_lock - - finally: - message :str = 'Exiting `Lock.acquire_for_ctx()` on behalf of sub-actor\n' - if we_acquired: - cls.release() - message += '-> TTY lock released by child\n' - - else: - message += '-> TTY lock never acquired by child??\n' - - log.runtime( - f'{message}\n' - f'{ctx}' - ) - - -def get_lock() -> Lock: - return Lock - - -@tractor.context( - # enable the locking msgspec - pld_spec=__pld_spec__, -) -async def lock_stdio_for_peer( - ctx: Context, - subactor_task_uid: tuple[str, int], - -) -> LockStatus|LockRelease: - ''' - Lock the TTY in the root process of an actor tree in a new - inter-actor-context-task such that the ``pdbp`` debugger console - can be mutex-allocated to the calling sub-actor for REPL control - without interference by other processes / threads. - - NOTE: this task must be invoked in the root process of the actor - tree. It is meant to be invoked as an rpc-task and should be - highly reliable at releasing the mutex complete! - - ''' - subactor_uid: tuple[str, str] = ctx.chan.uid - - # mark the tty lock as being in use so that the runtime - # can try to avoid clobbering any connection from a child - # that's currently relying on it. - we_finished = Lock.req_handler_finished = trio.Event() - lock_blocked: bool = False - try: - if ctx.cid in Lock._blocked: - raise RuntimeError( - f'Double lock request!?\n' - f'The same remote task already has an active request for TTY lock ??\n\n' - f'subactor uid: {subactor_uid}\n\n' - - 'This might be mean that the requesting task ' - 'in `request_root_stdio_lock()` may have crashed?\n' - 'Consider that an internal bug exists given the TTY ' - '`Lock`ing IPC dialog..\n' - ) - Lock._blocked.add(ctx.cid) - lock_blocked = True - root_task_name: str = current_task().name - if tuple(subactor_uid) in Lock._blocked: - log.warning( - f'Subactor is blocked from acquiring debug lock..\n' - f'subactor_uid: {subactor_uid}\n' - f'remote task: {subactor_task_uid}\n' - ) - ctx._enter_debugger_on_cancel: bool = False - message: str = ( - f'Debug lock blocked for subactor\n\n' - f'x)<= {subactor_uid}\n\n' - - f'Likely because the root actor already started shutdown and is ' - 'closing IPC connections for this child!\n\n' - 'Cancelling debug request!\n' - ) - log.cancel(message) - await ctx.cancel() - raise DebugRequestError(message) - - log.devx( - 'Subactor attempting to acquire TTY lock\n' - f'root task: {root_task_name}\n' - f'subactor_uid: {subactor_uid}\n' - f'remote task: {subactor_task_uid}\n' - ) - DebugStatus.shield_sigint() - - # NOTE: we use the IPC ctx's cancel scope directly in order to - # ensure that on any transport failure, or cancellation request - # from the child we expect - # `Context._maybe_cancel_and_set_remote_error()` to cancel this - # scope despite the shielding we apply below. - debug_lock_cs: CancelScope = ctx._scope - - async with Lock.acquire_for_ctx(ctx=ctx): - debug_lock_cs.shield = True - - log.devx( - 'Subactor acquired debugger request lock!\n' - f'root task: {root_task_name}\n' - f'subactor_uid: {subactor_uid}\n' - f'remote task: {subactor_task_uid}\n\n' - - 'Sending `ctx.started(LockStatus)`..\n' - - ) - - # indicate to child that we've locked stdio - await ctx.started( - LockStatus( - subactor_uid=subactor_uid, - cid=ctx.cid, - locked=True, - ) - ) - - log.devx( - f'Actor {subactor_uid} acquired `Lock` via debugger request' - ) - - # wait for unlock pdb by child - async with ctx.open_stream() as stream: - release_msg: LockRelease = await stream.receive() - - # TODO: security around only releasing if - # these match? - log.devx( - f'TTY lock released requested\n\n' - f'{release_msg}\n' - ) - assert release_msg.cid == ctx.cid - assert release_msg.subactor_uid == tuple(subactor_uid) - - log.devx( - f'Actor {subactor_uid} released TTY lock' - ) - - return LockStatus( - subactor_uid=subactor_uid, - cid=ctx.cid, - locked=False, - ) - - except BaseException as req_err: - fail_reason: str = ( - f'on behalf of peer\n\n' - f'x)<=\n' - f' |_{subactor_task_uid!r}@{ctx.chan.uid!r}\n' - f'\n' - 'Forcing `Lock.release()` due to acquire failure!\n\n' - f'x)=>\n' - f' {ctx}' - ) - if isinstance(req_err, trio.Cancelled): - fail_reason = ( - 'Cancelled during stdio-mutex request ' - + - fail_reason - ) - else: - fail_reason = ( - 'Failed to deliver stdio-mutex request ' - + - fail_reason - ) - - log.exception(fail_reason) - Lock.release() - raise - - finally: - if lock_blocked: - Lock._blocked.remove(ctx.cid) - - # wakeup any waiters since the lock was (presumably) - # released, possibly only temporarily. - we_finished.set() - DebugStatus.unshield_sigint() - - -class DebugStateError(InternalError): - ''' - Something inconsistent or unexpected happend with a sub-actor's - debug mutex request to the root actor. - - ''' - - -# TODO: rename to ReplState or somethin? -# DebugRequest, make it a singleton instance? -class DebugStatus: - ''' - Singleton-state for debugging machinery in a subactor. - - Composes conc primitives for syncing with a root actor to - acquire the tree-global (TTY) `Lock` such that only ever one - actor's task can have the REPL active at a given time. - - Methods to shield the process' `SIGINT` handler are used - whenever a local task is an active REPL. - - ''' - # XXX local ref to the `pdbp.Pbp` instance, ONLY set in the - # actor-process that currently has activated a REPL i.e. it - # should be `None` (unset) in any other actor-process that does - # not yet have the `Lock` acquired via a root-actor debugger - # request. - repl: PdbREPL|None = None - - # TODO: yet again this looks like a task outcome where we need - # to sync to the completion of one task (and get its result) - # being used everywhere for syncing.. - # -[ ] see if we can get our proto oco task-mngr to work for - # this? - repl_task: Task|None = None - # repl_thread: Thread|None = None - # ^TODO? - - repl_release: trio.Event|None = None - - req_task: Task|None = None - req_ctx: Context|None = None - req_cs: CancelScope|None = None - req_finished: trio.Event|None = None - req_err: BaseException|None = None - - lock_status: LockStatus|None = None - - _orig_sigint_handler: Callable|None = None - _trio_handler: ( - Callable[[int, FrameType|None], Any] - |int - | None - ) = None - - @classmethod - def repr(cls) -> str: - fields: str = ( - f'repl: {cls.repl}\n' - f'repl_task: {cls.repl_task}\n' - f'repl_release: {cls.repl_release}\n' - f'req_ctx: {cls.req_ctx}\n' - ) - body: str = textwrap.indent( - fields, - prefix=' |_', - ) - return ( - f'<{cls.__name__}(\n' - f'{body}' - ')>' - ) - - # TODO: how do you get this to work on a non-inited class? - # __repr__ = classmethod(repr) - # __str__ = classmethod(repr) - - @classmethod - def shield_sigint(cls): - ''' - Shield out SIGINT handling (which by default triggers - `Task` cancellation) in subactors when a `pdb` REPL - is active. - - Avoids cancellation of the current actor (task) when the user - mistakenly sends ctl-c or via a recevied signal (from an - external request). Explicit runtime cancel requests are - allowed until the current REPL-session (the blocking call - `Pdb.interaction()`) exits, normally via the 'continue' or - 'quit' command - at which point the orig SIGINT handler is - restored via `.unshield_sigint()` below. - - Impl notes: - ----------- - - we prefer that `trio`'s default handler is always used when - SIGINT is unshielded (hence disabling the `pdb.Pdb` - defaults in `mk_pdb()`) such that reliable KBI cancellation - is always enforced. - - - we always detect whether we're running from a non-main - thread, in which case schedule the SIGINT shielding override - to in the main thread as per, - - https://docs.python.org/3/library/signal.html#signals-and-threads - - ''' - # - # XXX detect whether we're running from a non-main thread - # in which case schedule the SIGINT shielding override - # to in the main thread. - # https://docs.python.org/3/library/signal.html#signals-and-threads - if ( - not cls.is_main_trio_thread() - and - not _state._runtime_vars.get( - '_is_infected_aio', - False, - ) - ): - cls._orig_sigint_handler: Callable = trio.from_thread.run_sync( - signal.signal, - signal.SIGINT, - sigint_shield, - ) - - else: - cls._orig_sigint_handler = signal.signal( - signal.SIGINT, - sigint_shield, - ) - - @classmethod - @pdbp.hideframe # XXX NOTE XXX see below in `.pause_from_sync()` - def unshield_sigint(cls): - ''' - Un-shield SIGINT for REPL-active (su)bactor. - - See details in `.shield_sigint()`. - - ''' - # always restore ``trio``'s sigint handler. see notes below in - # the pdb factory about the nightmare that is that code swapping - # out the handler when the repl activates... - # if not cls.is_main_trio_thread(): - if ( - not cls.is_main_trio_thread() - and - not _state._runtime_vars.get( - '_is_infected_aio', - False, - ) - # not current_actor().is_infected_aio() - # ^XXX, since for bg-thr case will always raise.. - ): - trio.from_thread.run_sync( - signal.signal, - signal.SIGINT, - cls._trio_handler, - ) - else: - trio_h: Callable = cls._trio_handler - # XXX should never really happen XXX - if not trio_h: - mk_pdb().set_trace() - - signal.signal( - signal.SIGINT, - cls._trio_handler, - ) - - cls._orig_sigint_handler = None - - @classmethod - def is_main_trio_thread(cls) -> bool: - ''' - Check if we're the "main" thread (as in the first one - started by cpython) AND that it is ALSO the thread that - called `trio.run()` and not some thread spawned with - `trio.to_thread.run_sync()`. - - ''' - try: - async_lib: str = sniffio.current_async_library() - except sniffio.AsyncLibraryNotFoundError: - async_lib = None - - is_main_thread: bool = trio._util.is_main_thread() - # ^TODO, since this is private, @oremanj says - # we should just copy the impl for now..? - if is_main_thread: - thread_name: str = 'main' - else: - thread_name: str = threading.current_thread().name - - is_trio_main = ( - is_main_thread - and - (async_lib == 'trio') - ) - - report: str = f'Running thread: {thread_name!r}\n' - if async_lib: - report += ( - f'Current async-lib detected by `sniffio`: {async_lib}\n' - ) - else: - report += ( - 'No async-lib detected (by `sniffio`) ??\n' - ) - if not is_trio_main: - log.warning(report) - - return is_trio_main - # XXX apparently unreliable..see ^ - # ( - # threading.current_thread() - # is not threading.main_thread() - # ) - - @classmethod - def cancel(cls) -> bool: - if (req_cs := cls.req_cs): - req_cs.cancel() - return True - - return False - - @classmethod - # @pdbp.hideframe - def release( - cls, - cancel_req_task: bool = False, - ): - repl_release: trio.Event = cls.repl_release - try: - # sometimes the task might already be terminated in - # which case this call will raise an RTE? - # See below for reporting on that.. - if ( - repl_release is not None - and - not repl_release.is_set() - ): - if cls.is_main_trio_thread(): - repl_release.set() - - elif ( - _state._runtime_vars.get( - '_is_infected_aio', - False, - ) - # ^XXX, again bc we need to not except - # but for bg-thread case it will always raise.. - # - # TODO, is there a better api then using - # `err_on_no_runtime=False` in the below? - # current_actor().is_infected_aio() - ): - async def _set_repl_release(): - repl_release.set() - - fute: asyncio.Future = run_trio_task_in_future( - _set_repl_release - ) - if not fute.done(): - log.warning('REPL release state unknown..?') - - else: - # XXX NOTE ONLY used for bg root-actor sync - # threads, see `.pause_from_sync()`. - trio.from_thread.run_sync( - repl_release.set - ) - - except RuntimeError as rte: - log.exception( - f'Failed to release debug-request ??\n\n' - f'{cls.repr()}\n' - ) - # pdbp.set_trace() - raise rte - - finally: - # if req_ctx := cls.req_ctx: - # req_ctx._scope.cancel() - if cancel_req_task: - cancelled: bool = cls.cancel() - if not cancelled: - log.warning( - 'Failed to cancel request task!?\n' - f'{cls.repl_task}\n' - ) - - # actor-local state, irrelevant for non-root. - cls.repl_task = None - - # XXX WARNING needs very special caughtion, and we should - # prolly make a more explicit `@property` API? - # - # - if unset in root multi-threaded case can cause - # issues with detecting that some root thread is - # using a REPL, - # - # - what benefit is there to unsetting, it's always - # set again for the next task in some actor.. - # only thing would be to avoid in the sigint-handler - # logging when we don't need to? - cls.repl = None - - # maybe restore original sigint handler - # XXX requires runtime check to avoid crash! - if current_actor(err_on_no_runtime=False): - cls.unshield_sigint() - - -# TODO: use the new `@lowlevel.singleton` for this! -def get_debug_req() -> DebugStatus|None: - return DebugStatus - - -class TractorConfig(pdbp.DefaultConfig): - ''' - Custom `pdbp` config which tries to use the best tradeoff - between pretty and minimal. - - ''' - use_pygments: bool = True - sticky_by_default: bool = False - enable_hidden_frames: bool = True - - # much thanks @mdmintz for the hot tip! - # fixes line spacing issue when resizing terminal B) - truncate_long_lines: bool = False - - # ------ - ------ - # our own custom config vars mostly - # for syncing with the actor tree's singleton - # TTY `Lock`. - - -class PdbREPL(pdbp.Pdb): - ''' - Add teardown hooks and local state describing any - ongoing TTY `Lock` request dialog. - - ''' - # override the pdbp config with our coolio one - # NOTE: this is only loaded when no `~/.pdbrc` exists - # so we should prolly pass it into the .__init__() instead? - # i dunno, see the `DefaultFactory` and `pdb.Pdb` impls. - DefaultConfig = TractorConfig - - status = DebugStatus - - # NOTE: see details in stdlib's `bdb.py` - # def user_exception(self, frame, exc_info): - # ''' - # Called when we stop on an exception. - # ''' - # log.warning( - # 'Exception during REPL sesh\n\n' - # f'{frame}\n\n' - # f'{exc_info}\n\n' - # ) - - # NOTE: this actually hooks but i don't see anyway to detect - # if an error was caught.. this is why currently we just always - # call `DebugStatus.release` inside `_post_mortem()`. - # def preloop(self): - # print('IN PRELOOP') - # super().preloop() - - # TODO: cleaner re-wrapping of all this? - # -[ ] figure out how to disallow recursive .set_trace() entry - # since that'll cause deadlock for us. - # -[ ] maybe a `@cm` to call `super().()`? - # -[ ] look at hooking into the `pp` hook specially with our - # own set of pretty-printers? - # * `.pretty_struct.Struct.pformat()` - # * `.pformat(MsgType.pld)` - # * `.pformat(Error.tb_str)`? - # * .. maybe more? - # - def set_continue(self): - try: - super().set_continue() - finally: - # NOTE: for subactors the stdio lock is released via the - # allocated RPC locker task, so for root we have to do it - # manually. - if ( - is_root_process() - and - Lock._debug_lock.locked() - and - DebugStatus.is_main_trio_thread() - ): - # Lock.release(raise_on_thread=False) - Lock.release() - - # XXX AFTER `Lock.release()` for root local repl usage - DebugStatus.release() - - def set_quit(self): - try: - super().set_quit() - finally: - if ( - is_root_process() - and - Lock._debug_lock.locked() - and - DebugStatus.is_main_trio_thread() - ): - # Lock.release(raise_on_thread=False) - Lock.release() - - # XXX after `Lock.release()` for root local repl usage - DebugStatus.release() - - # XXX NOTE: we only override this because apparently the stdlib pdb - # bois likes to touch the SIGINT handler as much as i like to touch - # my d$%&. - def _cmdloop(self): - self.cmdloop() - - @cached_property - def shname(self) -> str | None: - ''' - Attempt to return the login shell name with a special check for - the infamous `xonsh` since it seems to have some issues much - different from std shells when it comes to flushing the prompt? - - ''' - # SUPER HACKY and only really works if `xonsh` is not used - # before spawning further sub-shells.. - shpath = os.getenv('SHELL', None) - - if shpath: - if ( - os.getenv('XONSH_LOGIN', default=False) - or 'xonsh' in shpath - ): - return 'xonsh' - - return os.path.basename(shpath) - - return None - - -async def request_root_stdio_lock( - actor_uid: tuple[str, str], - task_uid: tuple[str, int], - - shield: bool = False, - task_status: TaskStatus[CancelScope] = trio.TASK_STATUS_IGNORED, -): - ''' - Connect to the root actor for this actor's process tree and - RPC-invoke a task which acquires the std-streams global `Lock`: - a process-tree-global mutex which prevents multiple actors from - entering `PdbREPL.interaction()` at the same time such that the - parent TTY's stdio is never "clobbered" by simultaneous - reads/writes. - - The actual `Lock` singleton instance exists ONLY in the root - actor's memory space and does nothing more then manage - process-tree global state, - namely a `._debug_lock: trio.FIFOLock`. - - The actual `PdbREPL` interaction/operation is completely isolated - to each sub-actor (process) with the root's `Lock` providing the - multi-process mutex-syncing mechanism to avoid parallel REPL - usage within an actor tree. - - ''' - log.devx( - 'Initing stdio-lock request task with root actor' - ) - # TODO: can we implement this mutex more generally as - # a `._sync.Lock`? - # -[ ] simply add the wrapping needed for the debugger specifics? - # - the `__pld_spec__` impl and maybe better APIs for the client - # vs. server side state tracking? (`Lock` + `DebugStatus`) - # -[ ] for eg. `mp` has a multi-proc lock via the manager - # - https://docs.python.org/3.8/library/multiprocessing.html#synchronization-primitives - # -[ ] technically we need a `RLock` since re-acquire should be a noop - # - https://docs.python.org/3.8/library/multiprocessing.html#multiprocessing.RLock - DebugStatus.req_finished = trio.Event() - DebugStatus.req_task = current_task() - req_err: BaseException|None = None - try: - from tractor._discovery import get_root - # NOTE: we need this to ensure that this task exits - # BEFORE the REPl instance raises an error like - # `bdb.BdbQuit` directly, OW you get a trio cs stack - # corruption! - # Further, the since this task is spawned inside the - # `Context._scope_nursery: trio.Nursery`, once an RPC - # task errors that cs is cancel_called and so if we want - # to debug the TPC task that failed we need to shield - # against that expected `.cancel()` call and instead - # expect all of the `PdbREPL`.set_[continue/quit/]()` - # methods to unblock this task by setting the - # `.repl_release: # trio.Event`. - with trio.CancelScope(shield=shield) as req_cs: - # XXX: was orig for debugging cs stack corruption.. - # log.devx( - # 'Request cancel-scope is:\n\n' - # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' - # ) - DebugStatus.req_cs = req_cs - req_ctx: Context|None = None - ctx_eg: BaseExceptionGroup|None = None - try: - # TODO: merge into single async with ? - async with get_root() as portal: - async with portal.open_context( - lock_stdio_for_peer, - subactor_task_uid=task_uid, - - # NOTE: set it here in the locker request task bc it's - # possible for multiple such requests for the lock in any - # single sub-actor AND there will be a race between when the - # root locking task delivers the `Started(pld=LockStatus)` - # and when the REPL is actually entered by the requesting - # application task who called - # `.pause()`/`.post_mortem()`. - # - # SO, applying the pld-spec here means it is only applied to - # this IPC-ctx request task, NOT any other task(s) - # including the one that actually enters the REPL. This - # is oc desired bc ow the debugged task will msg-type-error. - # pld_spec=__pld_spec__, - - ) as (req_ctx, status): - - DebugStatus.req_ctx = req_ctx - log.devx( - 'Subactor locked TTY with msg\n\n' - f'{status}\n' - ) - - # try: - if (locker := status.subactor_uid) != actor_uid: - raise DebugStateError( - f'Root actor locked by another peer !?\n' - f'locker: {locker!r}\n' - f'actor_uid: {actor_uid}\n' - ) - assert status.cid - # except AttributeError: - # log.exception('failed pldspec asserts!') - # mk_pdb().set_trace() - # raise - - # set last rxed lock dialog status. - DebugStatus.lock_status = status - - async with req_ctx.open_stream() as stream: - task_status.started(req_ctx) - - # wait for local task to exit - # `PdbREPL.interaction()`, normally via - # a `DebugStatus.release()`call, and - # then unblock us here. - await DebugStatus.repl_release.wait() - await stream.send( - LockRelease( - subactor_uid=actor_uid, - cid=status.cid, - ) - ) - - # sync with child-side root locker task - # completion - status: LockStatus = await req_ctx.result() - assert not status.locked - DebugStatus.lock_status = status - - log.devx( - 'TTY lock was released for subactor with msg\n\n' - f'{status}\n\n' - f'Exitting {req_ctx.side!r}-side of locking req_ctx\n' - ) - - except* ( - tractor.ContextCancelled, - trio.Cancelled, - ) as _taskc_eg: - ctx_eg = _taskc_eg - log.cancel( - 'Debug lock request was CANCELLED?\n\n' - f'<=c) {req_ctx}\n' - # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' - # f'{pformat_cs(req_ctx._scope, var_name="req_ctx._scope")}\n\n' - ) - raise - - except* ( - BaseException, - ) as _ctx_eg: - ctx_eg = _ctx_eg - message: str = ( - 'Failed during debug request dialog with root actor?\n' - ) - if (req_ctx := DebugStatus.req_ctx): - message += ( - f'<=x)\n' - f' |_{req_ctx}\n' - f'Cancelling IPC ctx!\n' - ) - try: - await req_ctx.cancel() - except trio.ClosedResourceError as terr: - ctx_eg.add_note( - # f'Failed with {type(terr)!r} x)> `req_ctx.cancel()` ' - f'Failed with `req_ctx.cancel()` PdbREPL: - ''' - Deliver a new `PdbREPL`: a multi-process safe `pdbp.Pdb`-variant - using the magic of `tractor`'s SC-safe IPC. - - B) - - Our `pdb.Pdb` subtype accomplishes multi-process safe debugging - by: - - - mutexing access to the root process' std-streams (& thus parent - process TTY) via an IPC managed `Lock` singleton per - actor-process tree. - - - temporarily overriding any subactor's SIGINT handler to shield - during live REPL sessions in sub-actors such that cancellation - is never (mistakenly) triggered by a ctrl-c and instead only by - explicit runtime API requests or after the - `pdb.Pdb.interaction()` call has returned. - - FURTHER, the `pdbp.Pdb` instance is configured to be `trio` - "compatible" from a SIGINT handling perspective; we mask out - the default `pdb` handler and instead apply `trio`s default - which mostly addresses all issues described in: - - - https://github.com/python-trio/trio/issues/1155 - - The instance returned from this factory should always be - preferred over the default `pdb[p].set_trace()` whenever using - a `pdb` REPL inside a `trio` based runtime. - - ''' - pdb = PdbREPL() - - # XXX: These are the important flags mentioned in - # https://github.com/python-trio/trio/issues/1155 - # which resolve the traceback spews to console. - pdb.allow_kbdint = True - pdb.nosigint = True - return pdb - - -def any_connected_locker_child() -> bool: - ''' - Predicate to determine if a reported child subactor in debug - is actually connected. - - Useful to detect stale `Lock` requests after IPC failure. - - ''' - actor: Actor = current_actor() - - if not is_root_process(): - raise InternalError('This is a root-actor only API!') - - if ( - (ctx := Lock.ctx_in_debug) - and - (uid_in_debug := ctx.chan.uid) - ): - chans: list[tractor.Channel] = actor._peers.get( - tuple(uid_in_debug) - ) - if chans: - return any( - chan.connected() - for chan in chans - ) - - return False - - -_ctlc_ignore_header: str = ( - 'Ignoring SIGINT while debug REPL in use' -) - -def sigint_shield( - signum: int, - frame: 'frame', # type: ignore # noqa - *args, - -) -> None: - ''' - Specialized, debugger-aware SIGINT handler. - - In childred we always ignore/shield for SIGINT to avoid - deadlocks since cancellation should always be managed by the - supervising parent actor. The root actor-proces is always - cancelled on ctrl-c. - - ''' - __tracebackhide__: bool = True - actor: Actor = current_actor() - - def do_cancel(): - # If we haven't tried to cancel the runtime then do that instead - # of raising a KBI (which may non-gracefully destroy - # a ``trio.run()``). - if not actor._cancel_called: - actor.cancel_soon() - - # If the runtime is already cancelled it likely means the user - # hit ctrl-c again because teardown didn't fully take place in - # which case we do the "hard" raising of a local KBI. - else: - raise KeyboardInterrupt - - # only set in the actor actually running the REPL - repl: PdbREPL|None = DebugStatus.repl - - # TODO: maybe we should flatten out all these cases using - # a match/case? - # - # root actor branch that reports whether or not a child - # has locked debugger. - if is_root_process(): - # log.warning( - log.devx( - 'Handling SIGINT in root actor\n' - f'{Lock.repr()}' - f'{DebugStatus.repr()}\n' - ) - # try to see if the supposed (sub)actor in debug still - # has an active connection to *this* actor, and if not - # it's likely they aren't using the TTY lock / debugger - # and we should propagate SIGINT normally. - any_connected: bool = any_connected_locker_child() - - problem = ( - f'root {actor.uid} handling SIGINT\n' - f'any_connected: {any_connected}\n\n' - - f'{Lock.repr()}\n' - ) - - if ( - (ctx := Lock.ctx_in_debug) - and - (uid_in_debug := ctx.chan.uid) # "someone" is (ostensibly) using debug `Lock` - ): - name_in_debug: str = uid_in_debug[0] - assert not repl - # if not repl: # but it's NOT us, the root actor. - # sanity: since no repl ref is set, we def shouldn't - # be the lock owner! - assert name_in_debug != 'root' - - # IDEAL CASE: child has REPL as expected - if any_connected: # there are subactors we can contact - # XXX: only if there is an existing connection to the - # (sub-)actor in debug do we ignore SIGINT in this - # parent! Otherwise we may hang waiting for an actor - # which has already terminated to unlock. - # - # NOTE: don't emit this with `.pdb()` level in - # root without a higher level. - log.runtime( - _ctlc_ignore_header - + - f' by child ' - f'{uid_in_debug}\n' - ) - problem = None - - else: - problem += ( - '\n' - f'A `pdb` REPL is SUPPOSEDLY in use by child {uid_in_debug}\n' - f'BUT, no child actors are IPC contactable!?!?\n' - ) - - # IDEAL CASE: root has REPL as expected - else: - # root actor still has this SIGINT handler active without - # an actor using the `Lock` (a bug state) ?? - # => so immediately cancel any stale lock cs and revert - # the handler! - if not DebugStatus.repl: - # TODO: WHEN should we revert back to ``trio`` - # handler if this one is stale? - # -[ ] maybe after a counts work of ctl-c mashes? - # -[ ] use a state var like `stale_handler: bool`? - problem += ( - 'No subactor is using a `pdb` REPL according `Lock.ctx_in_debug`?\n' - 'BUT, the root should be using it, WHY this handler ??\n\n' - 'So either..\n' - '- some root-thread is using it but has no `.repl` set?, OR\n' - '- something else weird is going on outside the runtime!?\n' - ) - else: - # NOTE: since we emit this msg on ctl-c, we should - # also always re-print the prompt the tail block! - log.pdb( - _ctlc_ignore_header - + - f' by root actor..\n' - f'{DebugStatus.repl_task}\n' - f' |_{repl}\n' - ) - problem = None - - # XXX if one is set it means we ARE NOT operating an ideal - # case where a child subactor or us (the root) has the - # lock without any other detected problems. - if problem: - - # detect, report and maybe clear a stale lock request - # cancel scope. - lock_cs: trio.CancelScope = Lock.get_locking_task_cs() - maybe_stale_lock_cs: bool = ( - lock_cs is not None - and not lock_cs.cancel_called - ) - if maybe_stale_lock_cs: - problem += ( - '\n' - 'Stale `Lock.ctx_in_debug._scope: CancelScope` detected?\n' - f'{Lock.ctx_in_debug}\n\n' - - '-> Calling ctx._scope.cancel()!\n' - ) - lock_cs.cancel() - - # TODO: wen do we actually want/need this, see above. - # DebugStatus.unshield_sigint() - log.warning(problem) - - # child actor that has locked the debugger - elif not is_root_process(): - log.debug( - f'Subactor {actor.uid} handling SIGINT\n\n' - f'{Lock.repr()}\n' - ) - - rent_chan: Channel = actor._parent_chan - if ( - rent_chan is None - or - not rent_chan.connected() - ): - log.warning( - 'This sub-actor thinks it is debugging ' - 'but it has no connection to its parent ??\n' - f'{actor.uid}\n' - 'Allowing SIGINT propagation..' - ) - DebugStatus.unshield_sigint() - - repl_task: str|None = DebugStatus.repl_task - req_task: str|None = DebugStatus.req_task - if ( - repl_task - and - repl - ): - log.pdb( - _ctlc_ignore_header - + - f' by local task\n\n' - f'{repl_task}\n' - f' |_{repl}\n' - ) - elif req_task: - log.debug( - _ctlc_ignore_header - + - f' by local request-task and either,\n' - f'- someone else is already REPL-in and has the `Lock`, or\n' - f'- some other local task already is replin?\n\n' - f'{req_task}\n' - ) - - # TODO can we remove this now? - # -[ ] does this path ever get hit any more? - else: - msg: str = ( - 'SIGINT shield handler still active BUT, \n\n' - ) - if repl_task is None: - msg += ( - '- No local task claims to be in debug?\n' - ) - - if repl is None: - msg += ( - '- No local REPL is currently active?\n' - ) - - if req_task is None: - msg += ( - '- No debug request task is active?\n' - ) - - log.warning( - msg - + - 'Reverting handler to `trio` default!\n' - ) - DebugStatus.unshield_sigint() - - # XXX ensure that the reverted-to-handler actually is - # able to rx what should have been **this** KBI ;) - do_cancel() - - # TODO: how to handle the case of an intermediary-child actor - # that **is not** marked in debug mode? See oustanding issue: - # https://github.com/goodboy/tractor/issues/320 - # elif debug_mode(): - - # maybe redraw/print last REPL output to console since - # we want to alert the user that more input is expect since - # nothing has been done dur to ignoring sigint. - if ( - DebugStatus.repl # only when current actor has a REPL engaged - ): - flush_status: str = ( - 'Flushing stdout to ensure new prompt line!\n' - ) - - # XXX: yah, mega hack, but how else do we catch this madness XD - if ( - repl.shname == 'xonsh' - ): - flush_status += ( - '-> ALSO re-flushing due to `xonsh`..\n' - ) - repl.stdout.write(repl.prompt) - - # log.warning( - log.devx( - flush_status - ) - repl.stdout.flush() - - # TODO: better console UX to match the current "mode": - # -[ ] for example if in sticky mode where if there is output - # detected as written to the tty we redraw this part underneath - # and erase the past draw of this same bit above? - # repl.sticky = True - # repl._print_if_sticky() - - # also see these links for an approach from `ptk`: - # https://github.com/goodboy/tractor/issues/130#issuecomment-663752040 - # https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py - else: - log.devx( - # log.warning( - 'Not flushing stdout since not needed?\n' - f'|_{repl}\n' - ) - - # XXX only for tracing this handler - log.devx('exiting SIGINT') - - -_pause_msg: str = 'Opening a pdb REPL in paused actor' - - -class DebugRequestError(RuntimeError): - ''' - Failed to request stdio lock from root actor! - - ''' - - -_repl_fail_msg: str|None = ( - 'Failed to REPl via `_pause()` ' -) - - -async def _pause( - - debug_func: Callable|partial|None, - - # NOTE: must be passed in the `.pause_from_sync()` case! - repl: PdbREPL|None = None, - - # TODO: allow caller to pause despite task cancellation, - # exactly the same as wrapping with: - # with CancelScope(shield=True): - # await pause() - # => the REMAINING ISSUE is that the scope's .__exit__() frame - # is always show in the debugger on entry.. and there seems to - # be no way to override it?.. - # - shield: bool = False, - hide_tb: bool = True, - called_from_sync: bool = False, - called_from_bg_thread: bool = False, - task_status: TaskStatus[ - tuple[Task, PdbREPL], - trio.Event - ] = trio.TASK_STATUS_IGNORED, - **debug_func_kwargs, - -) -> tuple[Task, PdbREPL]|None: - ''' - Inner impl for `pause()` to avoid the `trio.CancelScope.__exit__()` - stack frame when not shielded (since apparently i can't figure out - how to hide it using the normal mechanisms..) - - Hopefully we won't need this in the long run. - - ''' - __tracebackhide__: bool = hide_tb - pause_err: BaseException|None = None - actor: Actor = current_actor() - try: - task: Task = current_task() - except RuntimeError as rte: - # NOTE, 2 cases we might get here: - # - # - ACTUALLY not a `trio.lowlevel.Task` nor runtime caller, - # |_ error out as normal - # - # - an infected `asycio` actor calls it from an actual - # `asyncio.Task` - # |_ in this case we DO NOT want to RTE! - __tracebackhide__: bool = False - if actor.is_infected_aio(): - log.exception( - 'Failed to get current `trio`-task?' - ) - raise RuntimeError( - 'An `asyncio` task should not be calling this!?' - ) from rte - else: - task = asyncio.current_task() - - if debug_func is not None: - debug_func = partial(debug_func) - - # XXX NOTE XXX set it here to avoid ctl-c from cancelling a debug - # request from a subactor BEFORE the REPL is entered by that - # process. - if ( - not repl - and - debug_func - ): - repl: PdbREPL = mk_pdb() - DebugStatus.shield_sigint() - - # TODO: move this into a `open_debug_request()` @acm? - # -[ ] prolly makes the most sense to do the request - # task spawn as part of an `@acm` api which delivers the - # `DebugRequest` instance and ensures encapsing all the - # pld-spec and debug-nursery? - # -[ ] maybe make this a `PdbREPL` method or mod func? - # -[ ] factor out better, main reason for it is common logic for - # both root and sub repl entry - def _enter_repl_sync( - debug_func: partial[None], - ) -> None: - __tracebackhide__: bool = hide_tb - debug_func_name: str = ( - debug_func.func.__name__ if debug_func else 'None' - ) - - # TODO: do we want to support using this **just** for the - # locking / common code (prolly to help address #320)? - task_status.started((task, repl)) - try: - if debug_func: - # block here one (at the appropriate frame *up*) where - # ``breakpoint()`` was awaited and begin handling stdio. - log.devx( - 'Entering sync world of the `pdb` REPL for task..\n' - f'{repl}\n' - f' |_{task}\n' - ) - - # set local task on process-global state to avoid - # recurrent entries/requests from the same - # actor-local task. - DebugStatus.repl_task = task - if repl: - DebugStatus.repl = repl - else: - log.error( - 'No REPl instance set before entering `debug_func`?\n' - f'{debug_func}\n' - ) - - # invoke the low-level REPL activation routine which itself - # should call into a `Pdb.set_trace()` of some sort. - debug_func( - repl=repl, - hide_tb=hide_tb, - **debug_func_kwargs, - ) - - # TODO: maybe invert this logic and instead - # do `assert debug_func is None` when - # `called_from_sync`? - else: - if ( - called_from_sync - and - not DebugStatus.is_main_trio_thread() - ): - assert called_from_bg_thread - assert DebugStatus.repl_task is not task - - return (task, repl) - - except trio.Cancelled: - log.exception( - 'Cancelled during invoke of internal\n\n' - f'`debug_func = {debug_func_name}`\n' - ) - # XXX NOTE: DON'T release lock yet - raise - - except BaseException: - __tracebackhide__: bool = False - log.exception( - 'Failed to invoke internal\n\n' - f'`debug_func = {debug_func_name}`\n' - ) - # NOTE: OW this is ONLY called from the - # `.set_continue/next` hooks! - DebugStatus.release(cancel_req_task=True) - - raise - - log.devx( - 'Entering `._pause()` for requesting task\n' - f'|_{task}\n' - ) - - # TODO: this should be created as part of `DebugRequest()` init - # which should instead be a one-shot-use singleton much like - # the `PdbREPL`. - repl_task: Thread|Task|None = DebugStatus.repl_task - if ( - not DebugStatus.repl_release - or - DebugStatus.repl_release.is_set() - ): - log.devx( - 'Setting new `DebugStatus.repl_release: trio.Event` for requesting task\n' - f'|_{task}\n' - ) - DebugStatus.repl_release = trio.Event() - else: - log.devx( - 'Already an existing actor-local REPL user task\n' - f'|_{repl_task}\n' - ) - - # ^-NOTE-^ this must be created BEFORE scheduling any subactor - # debug-req task since it needs to wait on it just after - # `.started()`-ing back its wrapping `.req_cs: CancelScope`. - - repl_err: BaseException|None = None - try: - if is_root_process(): - # we also wait in the root-parent for any child that - # may have the tty locked prior - # TODO: wait, what about multiple root tasks (with bg - # threads) acquiring it though? - ctx: Context|None = Lock.ctx_in_debug - repl_task: Task|None = DebugStatus.repl_task - if ( - ctx is None - and - repl_task is task - # and - # DebugStatus.repl - # ^-NOTE-^ matches for multi-threaded case as well? - ): - # re-entrant root process already has it: noop. - log.warning( - f'This root actor task is already within an active REPL session\n' - f'Ignoring this recurrent`tractor.pause()` entry\n\n' - f'|_{task}\n' - # TODO: use `._frame_stack` scanner to find the @api_frame - ) - with trio.CancelScope(shield=shield): - await trio.lowlevel.checkpoint() - return (repl, task) - - # elif repl_task: - # log.warning( - # f'This root actor has another task already in REPL\n' - # f'Waitin for the other task to complete..\n\n' - # f'|_{task}\n' - # # TODO: use `._frame_stack` scanner to find the @api_frame - # ) - # with trio.CancelScope(shield=shield): - # await DebugStatus.repl_release.wait() - # await trio.sleep(0.1) - - # must shield here to avoid hitting a `Cancelled` and - # a child getting stuck bc we clobbered the tty - with trio.CancelScope(shield=shield): - ctx_line = '`Lock` in this root actor task' - acq_prefix: str = 'shield-' if shield else '' - if ( - Lock._debug_lock.locked() - ): - if ctx: - ctx_line: str = ( - 'active `Lock` owned by ctx\n\n' - f'{ctx}' - ) - elif Lock._owned_by_root: - ctx_line: str = ( - 'Already owned by root-task `Lock`\n\n' - f'repl_task: {DebugStatus.repl_task}\n' - f'repl: {DebugStatus.repl}\n' - ) - else: - ctx_line: str = ( - '**STALE `Lock`** held by unknown root/remote task ' - 'with no request ctx !?!?' - ) - - log.devx( - f'attempting to {acq_prefix}acquire ' - f'{ctx_line}' - ) - await Lock._debug_lock.acquire() - Lock._owned_by_root = True - # else: - - # if ( - # not called_from_bg_thread - # and not called_from_sync - # ): - # log.devx( - # f'attempting to {acq_prefix}acquire ' - # f'{ctx_line}' - # ) - - # XXX: since we need to enter pdb synchronously below, - # and we don't want to block the thread that starts - # stepping through the application thread, we later - # must `Lock._debug_lock.release()` manually from - # some `PdbREPL` completion callback(`.set_[continue/exit]()`). - # - # So, when `._pause()` is called from a (bg/non-trio) - # thread, special provisions are needed and we need - # to do the `.acquire()`/`.release()` calls from - # a common `trio.task` (due to internal impl of - # `FIFOLock`). Thus we do not acquire here and - # instead expect `.pause_from_sync()` to take care of - # this detail depending on the caller's (threading) - # usage. - # - # NOTE that this special case is ONLY required when - # using `.pause_from_sync()` from the root actor - # since OW a subactor will instead make an IPC - # request (in the branch below) to acquire the - # `Lock`-mutex and a common root-actor RPC task will - # take care of `._debug_lock` mgmt! - - # enter REPL from root, no TTY locking IPC ctx necessary - # since we can acquire the `Lock._debug_lock` directly in - # thread. - return _enter_repl_sync(debug_func) - - # TODO: need a more robust check for the "root" actor - elif ( - not is_root_process() - and actor._parent_chan # a connected child - ): - repl_task: Task|None = DebugStatus.repl_task - req_task: Task|None = DebugStatus.req_task - if req_task: - log.warning( - f'Already an ongoing repl request?\n' - f'|_{req_task}\n\n' - - f'REPL task is\n' - f'|_{repl_task}\n\n' - - ) - # Recurrent entry case. - # this task already has the lock and is likely - # recurrently entering a `.pause()`-point either bc, - # - someone is hacking on runtime internals and put - # one inside code that get's called on the way to - # this code, - # - a legit app task uses the 'next' command while in - # a REPL sesh, and actually enters another - # `.pause()` (in a loop or something). - # - # XXX Any other cose is likely a bug. - if ( - repl_task - ): - if repl_task is task: - log.warning( - f'{task.name}@{actor.uid} already has TTY lock\n' - f'ignoring..' - ) - with trio.CancelScope(shield=shield): - await trio.lowlevel.checkpoint() - return - - else: - # if **this** actor is already in debug REPL we want - # to maintain actor-local-task mutex access, so block - # here waiting for the control to be released - this - # -> allows for recursive entries to `tractor.pause()` - log.warning( - f'{task}@{actor.uid} already has TTY lock\n' - f'waiting for release..' - ) - with trio.CancelScope(shield=shield): - await DebugStatus.repl_release.wait() - await trio.sleep(0.1) - - elif ( - req_task - ): - log.warning( - 'Local task already has active debug request\n' - f'|_{task}\n\n' - - 'Waiting for previous request to complete..\n' - ) - with trio.CancelScope(shield=shield): - await DebugStatus.req_finished.wait() - - # this **must** be awaited by the caller and is done using the - # root nursery so that the debugger can continue to run without - # being restricted by the scope of a new task nursery. - - # TODO: if we want to debug a trio.Cancelled triggered exception - # we have to figure out how to avoid having the service nursery - # cancel on this task start? I *think* this works below: - # ```python - # actor._service_n.cancel_scope.shield = shield - # ``` - # but not entirely sure if that's a sane way to implement it? - - # NOTE currently we spawn the lock request task inside this - # subactor's global `Actor._service_n` so that the - # lifetime of the lock-request can outlive the current - # `._pause()` scope while the user steps through their - # application code and when they finally exit the - # session, via 'continue' or 'quit' cmds, the `PdbREPL` - # will manually call `DebugStatus.release()` to release - # the lock session with the root actor. - # - # TODO: ideally we can add a tighter scope for this - # request task likely by conditionally opening a "debug - # nursery" inside `_errors_relayed_via_ipc()`, see the - # todo in tht module, but - # -[ ] it needs to be outside the normal crash handling - # `_maybe_enter_debugger()` block-call. - # -[ ] we probably only need to allocate the nursery when - # we detect the runtime is already in debug mode. - # - curr_ctx: Context = current_ipc_ctx() - # req_ctx: Context = await curr_ctx._debug_tn.start( - log.devx( - 'Starting request task\n' - f'|_{task}\n' - ) - with trio.CancelScope(shield=shield): - req_ctx: Context = await actor._service_n.start( - partial( - request_root_stdio_lock, - actor_uid=actor.uid, - task_uid=(task.name, id(task)), # task uuid (effectively) - shield=shield, - ) - ) - # XXX sanity, our locker task should be the one which - # entered a new IPC ctx with the root actor, NOT the one - # that exists around the task calling into `._pause()`. - assert ( - req_ctx - is - DebugStatus.req_ctx - is not - curr_ctx - ) - - # enter REPL - return _enter_repl_sync(debug_func) - - # TODO: prolly factor this plus the similar block from - # `_enter_repl_sync()` into a common @cm? - except BaseException as _pause_err: - pause_err: BaseException = _pause_err - _repl_fail_report: str|None = _repl_fail_msg - if isinstance(pause_err, bdb.BdbQuit): - log.devx( - 'REPL for pdb was explicitly quit!\n' - ) - _repl_fail_report = None - - # when the actor is mid-runtime cancellation the - # `Actor._service_n` might get closed before we can spawn - # the request task, so just ignore expected RTE. - elif ( - isinstance(pause_err, RuntimeError) - and - actor._cancel_called - ): - # service nursery won't be usable and we - # don't want to lock up the root either way since - # we're in (the midst of) cancellation. - log.warning( - 'Service nursery likely closed due to actor-runtime cancellation..\n' - 'Ignoring failed debugger lock request task spawn..\n' - ) - return - - elif isinstance(pause_err, trio.Cancelled): - _repl_fail_report += ( - 'You called `tractor.pause()` from an already cancelled scope!\n\n' - 'Consider `await tractor.pause(shield=True)` to make it work B)\n' - ) - - else: - _repl_fail_report += f'on behalf of {repl_task} ??\n' - - if _repl_fail_report: - log.exception(_repl_fail_report) - - if not actor.is_infected_aio(): - DebugStatus.release(cancel_req_task=True) - - # sanity checks for ^ on request/status teardown - # assert DebugStatus.repl is None # XXX no more bc bg thread cases? - assert DebugStatus.repl_task is None - - # sanity, for when hackin on all this? - if not isinstance(pause_err, trio.Cancelled): - req_ctx: Context = DebugStatus.req_ctx - # if req_ctx: - # # XXX, bc the child-task in root might cancel it? - # # assert req_ctx._scope.cancel_called - # assert req_ctx.maybe_error - - raise - - finally: - # set in finally block of func.. this can be synced-to - # eventually with a debug_nursery somehow? - # assert DebugStatus.req_task is None - - # always show frame when request fails due to internal - # failure in the above code (including an `BdbQuit`). - if ( - DebugStatus.req_err - or - repl_err - or - pause_err - ): - __tracebackhide__: bool = False - - -def _set_trace( - repl: PdbREPL, # passed by `_pause()` - hide_tb: bool, - - # partial-ed in by `.pause()` - api_frame: FrameType, - - # optionally passed in to provide support for - # `pause_from_sync()` where - actor: tractor.Actor|None = None, - task: Task|Thread|None = None, -): - __tracebackhide__: bool = hide_tb - actor: tractor.Actor = actor or current_actor() - task: Task|Thread = task or current_task() - - # else: - # TODO: maybe print the actor supervion tree up to the - # root here? Bo - log.pdb( - f'{_pause_msg}\n' - f'>(\n' - f'|_{actor.uid}\n' - f' |_{task}\n' # @ {actor.uid}\n' - # f'|_{task}\n' - # ^-TODO-^ more compact pformating? - # -[ ] make an `Actor.__repr()__` - # -[ ] should we use `log.pformat_task_uid()`? - ) - # presuming the caller passed in the "api frame" - # (the last frame before user code - like `.pause()`) - # then we only step up one frame to where the user - # called our API. - caller_frame: FrameType = api_frame.f_back # type: ignore - - # pretend this frame is the caller frame to show - # the entire call-stack all the way down to here. - if not hide_tb: - caller_frame: FrameType = inspect.currentframe() - - # engage ze REPL - # B~() - repl.set_trace(frame=caller_frame) - - -# XXX TODO! XXX, ensure `pytest -s` doesn't just -# hang on this being called in a test.. XD -# -[ ] maybe something in our test suite or is there -# some way we can detect output capture is enabled -# from the process itself? -# |_ronny: ? -# -async def pause( - *, - hide_tb: bool = True, - api_frame: FrameType|None = None, - - # TODO: figure out how to still make this work: - # -[ ] pass it direct to `_pause()`? - # -[ ] use it to set the `debug_nursery.cancel_scope.shield` - shield: bool = False, - **_pause_kwargs, - -) -> None: - ''' - A pause point (more commonly known as a "breakpoint") interrupt - instruction for engaging a blocking debugger instance to - conduct manual console-based-REPL-interaction from within - `tractor`'s async runtime, normally from some single-threaded - and currently executing actor-hosted-`trio`-task in some - (remote) process. - - NOTE: we use the semantics "pause" since it better encompasses - the entirety of the necessary global-runtime-state-mutation any - actor-task must access and lock in order to get full isolated - control over the process tree's root TTY: - https://en.wikipedia.org/wiki/Breakpoint - - ''' - __tracebackhide__: bool = hide_tb - - # always start 1 level up from THIS in user code since normally - # `tractor.pause()` is called explicitly by use-app code thus - # making it the highest up @api_frame. - api_frame: FrameType = api_frame or inspect.currentframe() - - # XXX TODO: this was causing cs-stack corruption in trio due to - # usage within the `Context._scope_nursery` (which won't work - # based on scoping of it versus call to `_maybe_enter_debugger()` - # from `._rpc._invoke()`) - # with trio.CancelScope( - # shield=shield, - # ) as cs: - # NOTE: so the caller can always manually cancel even - # if shielded! - # task_status.started(cs) - # log.critical( - # '`.pause() cancel-scope is:\n\n' - # f'{pformat_cs(cs, var_name="pause_cs")}\n\n' - # ) - await _pause( - debug_func=partial( - _set_trace, - api_frame=api_frame, - ), - shield=shield, - **_pause_kwargs - ) - # XXX avoid cs stack corruption when `PdbREPL.interaction()` - # raises `BdbQuit`. - # await DebugStatus.req_finished.wait() - - -_gb_mod: None|ModuleType|False = None - - -def maybe_import_greenback( - raise_not_found: bool = True, - force_reload: bool = False, - -) -> ModuleType|False: - # be cached-fast on module-already-inited - global _gb_mod - - if _gb_mod is False: - return False - - elif ( - _gb_mod is not None - and not force_reload - ): - return _gb_mod - - try: - import greenback - _gb_mod = greenback - return greenback - - except ModuleNotFoundError as mnf: - log.debug( - '`greenback` is not installed.\n' - 'No sync debug support!\n' - ) - _gb_mod = False - - if raise_not_found: - raise RuntimeError( - 'The `greenback` lib is required to use `tractor.pause_from_sync()`!\n' - 'https://github.com/oremanj/greenback\n' - ) from mnf - - return False - - -async def maybe_init_greenback(**kwargs) -> None|ModuleType: - try: - if mod := maybe_import_greenback(**kwargs): - await mod.ensure_portal() - log.devx( - '`greenback` portal opened!\n' - 'Sync debug support activated!\n' - ) - return mod - except BaseException: - log.exception('Failed to init `greenback`..') - raise - - return None - - -async def _pause_from_bg_root_thread( - behalf_of_thread: Thread, - repl: PdbREPL, - hide_tb: bool, - task_status: TaskStatus[Task] = trio.TASK_STATUS_IGNORED, - **_pause_kwargs, -): - ''' - Acquire the `Lock._debug_lock` from a bg (only need for - root-actor) non-`trio` thread (started via a call to - `.to_thread.run_sync()` in some actor) by scheduling this func in - the actor's service (TODO eventually a special debug_mode) - nursery. This task acquires the lock then `.started()`s the - `DebugStatus.repl_release: trio.Event` waits for the `PdbREPL` to - set it, then terminates very much the same way as - `request_root_stdio_lock()` uses an IPC `Context` from a subactor - to do the same from a remote process. - - This task is normally only required to be scheduled for the - special cases of a bg sync thread running in the root actor; see - the only usage inside `.pause_from_sync()`. - - ''' - global Lock - # TODO: unify this copied code with where it was - # from in `maybe_wait_for_debugger()` - # if ( - # Lock.req_handler_finished is not None - # and not Lock.req_handler_finished.is_set() - # and (in_debug := Lock.ctx_in_debug) - # ): - # log.devx( - # '\nRoot is waiting on tty lock to release from\n\n' - # # f'{caller_frame_info}\n' - # ) - # with trio.CancelScope(shield=True): - # await Lock.req_handler_finished.wait() - - # log.pdb( - # f'Subactor released debug lock\n' - # f'|_{in_debug}\n' - # ) - task: Task = current_task() - - # Manually acquire since otherwise on release we'll - # get a RTE raised by `trio` due to ownership.. - log.devx( - 'Trying to acquire `Lock` on behalf of bg thread\n' - f'|_{behalf_of_thread}\n' - ) - - # NOTE: this is already a task inside the main-`trio`-thread, so - # we don't need to worry about calling it another time from the - # bg thread on which who's behalf this task is operating. - DebugStatus.shield_sigint() - - out = await _pause( - debug_func=None, - repl=repl, - hide_tb=hide_tb, - called_from_sync=True, - called_from_bg_thread=True, - **_pause_kwargs - ) - DebugStatus.repl_task = behalf_of_thread - - lock: trio.FIFOLock = Lock._debug_lock - stats: trio.LockStatistics= lock.statistics() - assert stats.owner is task - assert Lock._owned_by_root - assert DebugStatus.repl_release - - # TODO: do we actually need this? - # originally i was trying to solve wy this was - # unblocking too soon in a thread but it was actually - # that we weren't setting our own `repl_release` below.. - while stats.owner is not task: - log.devx( - 'Trying to acquire `._debug_lock` from {stats.owner} for\n' - f'|_{behalf_of_thread}\n' - ) - await lock.acquire() - break - - # XXX NOTE XXX super important dawg.. - # set our own event since the current one might - # have already been overriden and then set when the - # last REPL mutex holder exits their sesh! - # => we do NOT want to override any existing one - # and we want to ensure we set our own ONLY AFTER we have - # acquired the `._debug_lock` - repl_release = DebugStatus.repl_release = trio.Event() - - # unblock caller thread delivering this bg task - log.devx( - 'Unblocking root-bg-thread since we acquired lock via `._pause()`\n' - f'|_{behalf_of_thread}\n' - ) - task_status.started(out) - - # wait for bg thread to exit REPL sesh. - try: - await repl_release.wait() - finally: - log.devx( - 'releasing lock from bg root thread task!\n' - f'|_ {behalf_of_thread}\n' - ) - Lock.release() - - -def pause_from_sync( - hide_tb: bool = True, - called_from_builtin: bool = False, - api_frame: FrameType|None = None, - - allow_no_runtime: bool = False, - - # proxy to `._pause()`, for ex: - # shield: bool = False, - # api_frame: FrameType|None = None, - **_pause_kwargs, - -) -> None: - ''' - Pause a `tractor` scheduled task or thread from sync (non-async - function) code. - - When `greenback` is installed we remap python's builtin - `breakpoint()` hook to this runtime-aware version which takes - care of all bg-thread detection and appropriate synchronization - with the root actor's `Lock` to avoid mult-thread/process REPL - clobbering Bo - - ''' - __tracebackhide__: bool = hide_tb - repl_owner: Task|Thread|None = None - try: - actor: tractor.Actor = current_actor( - err_on_no_runtime=False, - ) - if ( - not actor - and - not allow_no_runtime - ): - raise NoRuntime( - 'The actor runtime has not been opened?\n\n' - '`tractor.pause_from_sync()` is not functional without a wrapping\n' - '- `async with tractor.open_nursery()` or,\n' - '- `async with tractor.open_root_actor()`\n\n' - - 'If you are getting this from a builtin `breakpoint()` call\n' - 'it might mean the runtime was started then ' - 'stopped prematurely?\n' - ) - message: str = ( - f'{actor.uid} task called `tractor.pause_from_sync()`\n' - ) - - repl: PdbREPL = mk_pdb() - - # message += f'-> created local REPL {repl}\n' - is_trio_thread: bool = DebugStatus.is_main_trio_thread() - is_root: bool = is_root_process() - is_infected_aio: bool = actor.is_infected_aio() - thread: Thread = threading.current_thread() - - asyncio_task: asyncio.Task|None = None - if is_infected_aio: - asyncio_task = asyncio.current_task() - - # TODO: we could also check for a non-`.to_thread` context - # using `trio.from_thread.check_cancelled()` (says - # oremanj) wherein we get the following outputs: - # - # `RuntimeError`: non-`.to_thread` spawned thread - # noop: non-cancelled `.to_thread` - # `trio.Cancelled`: cancelled `.to_thread` - - # when called from a (bg) thread, run an async task in a new - # thread which will call `._pause()` manually with special - # handling for root-actor caller usage. - if ( - not is_trio_thread - and - not asyncio_task - ): - # TODO: `threading.Lock()` this so we don't get races in - # multi-thr cases where they're acquiring/releasing the - # REPL and setting request/`Lock` state, etc.. - repl_owner: Thread = thread - - # TODO: make root-actor bg thread usage work! - if is_root: - message += ( - f'-> called from a root-actor bg {thread}\n' - ) - - message += ( - '-> scheduling `._pause_from_bg_root_thread()`..\n' - ) - # XXX SUBTLE BADNESS XXX that should really change! - # don't over-write the `repl` here since when - # this behalf-of-bg_thread-task calls pause it will - # pass `debug_func=None` which will result in it - # returing a `repl==None` output and that get's also - # `.started(out)` back here! So instead just ignore - # that output and assign the `repl` created above! - bg_task, _ = trio.from_thread.run( - afn=partial( - actor._service_n.start, - partial( - _pause_from_bg_root_thread, - behalf_of_thread=thread, - repl=repl, - hide_tb=hide_tb, - **_pause_kwargs, - ), - ), - ) - DebugStatus.shield_sigint() - message += ( - f'-> `._pause_from_bg_root_thread()` started bg task {bg_task}\n' - ) - else: - message += f'-> called from a bg {thread}\n' - # NOTE: since this is a subactor, `._pause()` will - # internally issue a debug request via - # `request_root_stdio_lock()` and we don't need to - # worry about all the special considerations as with - # the root-actor per above. - bg_task, _ = trio.from_thread.run( - afn=partial( - _pause, - debug_func=None, - repl=repl, - hide_tb=hide_tb, - - # XXX to prevent `._pause()` for setting - # `DebugStatus.repl_task` to the gb task! - called_from_sync=True, - called_from_bg_thread=True, - - **_pause_kwargs - ), - ) - # ?TODO? XXX where do we NEED to call this in the - # subactor-bg-thread case? - DebugStatus.shield_sigint() - assert bg_task is not DebugStatus.repl_task - - # TODO: once supported, remove this AND the one - # inside `._pause()`! - # outstanding impl fixes: - # -[ ] need to make `.shield_sigint()` below work here! - # -[ ] how to handle `asyncio`'s new SIGINT-handler - # injection? - # -[ ] should `breakpoint()` work and what does it normally - # do in `asyncio` ctxs? - # if actor.is_infected_aio(): - # raise RuntimeError( - # '`tractor.pause[_from_sync]()` not yet supported ' - # 'for infected `asyncio` mode!' - # ) - elif ( - not is_trio_thread - and - is_infected_aio # as in, the special actor-runtime mode - # ^NOTE XXX, that doesn't mean the caller is necessarily - # an `asyncio.Task` just that `trio` has been embedded on - # the `asyncio` event loop! - and - asyncio_task # transitive caller is an actual `asyncio.Task` - ): - greenback: ModuleType = maybe_import_greenback() - - if greenback.has_portal(): - DebugStatus.shield_sigint() - fute: asyncio.Future = run_trio_task_in_future( - partial( - _pause, - debug_func=None, - repl=repl, - hide_tb=hide_tb, - - # XXX to prevent `._pause()` for setting - # `DebugStatus.repl_task` to the gb task! - called_from_sync=True, - called_from_bg_thread=True, - - **_pause_kwargs - ) - ) - repl_owner = asyncio_task - bg_task, _ = greenback.await_(fute) - # TODO: ASYNC version -> `.pause_from_aio()`? - # bg_task, _ = await fute - - # handle the case where an `asyncio` task has been - # spawned WITHOUT enabling a `greenback` portal.. - # => can often happen in 3rd party libs. - else: - bg_task = repl_owner - - # TODO, ostensibly we can just acquire the - # debug lock directly presuming we're the - # root actor running in infected asyncio - # mode? - # - # TODO, this would be a special case where - # a `_pause_from_root()` would come in very - # handy! - # if is_root: - # import pdbp; pdbp.set_trace() - # log.warning( - # 'Allowing `asyncio` task to acquire debug-lock in root-actor..\n' - # 'This is not fully implemented yet; there may be teardown hangs!\n\n' - # ) - # else: - - # simply unsupported, since there exists no hack (i - # can think of) to workaround this in a subactor - # which needs to lock the root's REPL ow we're sure - # to get prompt stdstreams clobbering.. - cf_repr: str = '' - if api_frame: - caller_frame: FrameType = api_frame.f_back - cf_repr: str = f'caller_frame: {caller_frame!r}\n' - - raise RuntimeError( - f"CAN'T USE `greenback._await()` without a portal !?\n\n" - f'Likely this task was NOT spawned via the `tractor.to_asyncio` API..\n' - f'{asyncio_task}\n' - f'{cf_repr}\n' - - f'Prolly the task was started out-of-band (from some lib?)\n' - f'AND one of the below was never called ??\n' - f'- greenback.ensure_portal()\n' - f'- greenback.bestow_portal()\n' - ) - - else: # we are presumably the `trio.run()` + main thread - # raises on not-found by default - greenback: ModuleType = maybe_import_greenback() - - # TODO: how to ensure this is either dynamically (if - # needed) called here (in some bg tn??) or that the - # subactor always already called it? - # greenback: ModuleType = await maybe_init_greenback() - - message += f'-> imported {greenback}\n' - - # NOTE XXX seems to need to be set BEFORE the `_pause()` - # invoke using gb below? - DebugStatus.shield_sigint() - repl_owner: Task = current_task() - - message += '-> calling `greenback.await_(_pause(debug_func=None))` from sync caller..\n' - try: - out = greenback.await_( - _pause( - debug_func=None, - repl=repl, - hide_tb=hide_tb, - called_from_sync=True, - **_pause_kwargs, - ) - ) - except RuntimeError as rte: - if not _state._runtime_vars.get( - 'use_greenback', - False, - ): - raise RuntimeError( - '`greenback` was never initialized in this actor!?\n\n' - f'{_state._runtime_vars}\n' - ) from rte - - raise - - if out: - bg_task, _ = out - else: - bg_task: Task = current_task() - - # assert repl is repl - # assert bg_task is repl_owner - if bg_task is not repl_owner: - raise DebugStateError( - f'The registered bg task for this debug request is NOT its owner ??\n' - f'bg_task: {bg_task}\n' - f'repl_owner: {repl_owner}\n\n' - - f'{DebugStatus.repr()}\n' - ) - - # NOTE: normally set inside `_enter_repl_sync()` - DebugStatus.repl_task: str = repl_owner - - # TODO: ensure we aggressively make the user aware about - # entering the global `breakpoint()` built-in from sync - # code? - message += ( - f'-> successfully scheduled `._pause()` in `trio` thread on behalf of {bg_task}\n' - f'-> Entering REPL via `tractor._set_trace()` from caller {repl_owner}\n' - ) - log.devx(message) - - # NOTE set as late as possible to avoid state clobbering - # in the multi-threaded case! - DebugStatus.repl = repl - - _set_trace( - api_frame=api_frame or inspect.currentframe(), - repl=repl, - hide_tb=hide_tb, - actor=actor, - task=repl_owner, - ) - # LEGACY NOTE on next LOC's frame showing weirdness.. - # - # XXX NOTE XXX no other LOC can be here without it - # showing up in the REPL's last stack frame !?! - # -[ ] tried to use `@pdbp.hideframe` decoration but - # still doesn't work - except BaseException as err: - log.exception( - 'Failed to sync-pause from\n\n' - f'{repl_owner}\n' - ) - __tracebackhide__: bool = False - raise err - - -def _sync_pause_from_builtin( - *args, - called_from_builtin=True, - **kwargs, -) -> None: - ''' - Proxy call `.pause_from_sync()` but indicate the caller is the - `breakpoint()` built-in. - - Note: this assigned to `os.environ['PYTHONBREAKPOINT']` inside `._root` - - ''' - pause_from_sync( - *args, - called_from_builtin=True, - api_frame=inspect.currentframe(), - **kwargs, - ) - - -# NOTE prefer a new "pause" semantic since it better describes -# "pausing the actor's runtime" for this particular -# paralell task to do debugging in a REPL. -async def breakpoint( - hide_tb: bool = True, - **kwargs, -): - log.warning( - '`tractor.breakpoint()` is deprecated!\n' - 'Please use `tractor.pause()` instead!\n' - ) - __tracebackhide__: bool = hide_tb - await pause( - api_frame=inspect.currentframe(), - **kwargs, - ) - - -_crash_msg: str = ( - 'Opening a pdb REPL in crashed actor' -) - - -def _post_mortem( - repl: PdbREPL, # normally passed by `_pause()` - - # XXX all `partial`-ed in by `post_mortem()` below! - tb: TracebackType, - api_frame: FrameType, - - shield: bool = False, - hide_tb: bool = False, - -) -> None: - ''' - Enter the ``pdbpp`` port mortem entrypoint using our custom - debugger instance. - - ''' - __tracebackhide__: bool = hide_tb - try: - actor: tractor.Actor = current_actor() - actor_repr: str = str(actor.uid) - # ^TODO, instead a nice runtime-info + maddr + uid? - # -[ ] impl a `Actor.__repr()__`?? - # |_ : @ - # no_runtime: bool = False - - except NoRuntime: - actor_repr: str = '' - # no_runtime: bool = True - - try: - task_repr: Task = current_task() - except RuntimeError: - task_repr: str = '' - - # TODO: print the actor supervion tree up to the root - # here! Bo - log.pdb( - f'{_crash_msg}\n' - f'x>(\n' - f' |_ {task_repr} @ {actor_repr}\n' - - ) - - # NOTE only replacing this from `pdbp.xpm()` to add the - # `end=''` to the print XD - print(traceback.format_exc(), end='') - - caller_frame: FrameType = api_frame.f_back - - # NOTE: see the impl details of followings to understand usage: - # - `pdbp.post_mortem()` - # - `pdbp.xps()` - # - `bdb.interaction()` - repl.reset() - repl.interaction( - frame=caller_frame, - # frame=None, - traceback=tb, - ) - # XXX NOTE XXX: absolutely required to avoid hangs! - # Since we presume the post-mortem was enaged to a task-ending - # error, we MUST release the local REPL request so that not other - # local task nor the root remains blocked! - # if not no_runtime: - # DebugStatus.release() - DebugStatus.release() - - -async def post_mortem( - *, - tb: TracebackType|None = None, - api_frame: FrameType|None = None, - hide_tb: bool = False, - - # TODO: support shield here just like in `pause()`? - # shield: bool = False, - - **_pause_kwargs, - -) -> None: - ''' - `tractor`'s builtin async equivalient of `pdb.post_mortem()` - which can be used inside exception handlers. - - It's also used for the crash handler when `debug_mode == True` ;) - - ''' - __tracebackhide__: bool = hide_tb - - tb: TracebackType = tb or sys.exc_info()[2] - - # TODO: do upward stack scan for highest @api_frame and - # use its parent frame as the expected user-app code - # interact point. - api_frame: FrameType = api_frame or inspect.currentframe() - - await _pause( - debug_func=partial( - _post_mortem, - api_frame=api_frame, - tb=tb, - ), - hide_tb=hide_tb, - **_pause_kwargs - ) - - -async def _maybe_enter_pm( - err: BaseException, - *, - tb: TracebackType|None = None, - api_frame: FrameType|None = None, - hide_tb: bool = False, - - # only enter debugger REPL when returns `True` - debug_filter: Callable[ - [BaseException|BaseExceptionGroup], - bool, - ] = lambda err: not is_multi_cancelled(err), - -): - if ( - debug_mode() - - # NOTE: don't enter debug mode recursively after quitting pdb - # Iow, don't re-enter the repl if the `quit` command was issued - # by the user. - and not isinstance(err, bdb.BdbQuit) - - # XXX: if the error is the likely result of runtime-wide - # cancellation, we don't want to enter the debugger since - # there's races between when the parent actor has killed all - # comms and when the child tries to contact said parent to - # acquire the tty lock. - - # Really we just want to mostly avoid catching KBIs here so there - # might be a simpler check we can do? - and - debug_filter(err) - ): - api_frame: FrameType = api_frame or inspect.currentframe() - tb: TracebackType = tb or sys.exc_info()[2] - await post_mortem( - api_frame=api_frame, - tb=tb, - ) - return True - - else: - return False - - -@acm -async def acquire_debug_lock( - subactor_uid: tuple[str, str], -) -> AsyncGenerator[ - trio.CancelScope|None, - tuple, -]: - ''' - Request to acquire the TTY `Lock` in the root actor, release on - exit. - - This helper is for actor's who don't actually need to acquired - the debugger but want to wait until the lock is free in the - process-tree root such that they don't clobber an ongoing pdb - REPL session in some peer or child! - - ''' - if not debug_mode(): - yield None - return - - task: Task = current_task() - async with trio.open_nursery() as n: - ctx: Context = await n.start( - partial( - request_root_stdio_lock, - actor_uid=subactor_uid, - task_uid=(task.name, id(task)), - ) - ) - yield ctx - ctx.cancel() - - -async def maybe_wait_for_debugger( - poll_steps: int = 2, - poll_delay: float = 0.1, - child_in_debug: bool = False, - - header_msg: str = '', - _ll: str = 'devx', - -) -> bool: # was locked and we polled? - - if ( - not debug_mode() - and - not child_in_debug - ): - return False - - logmeth: Callable = getattr(log, _ll) - - msg: str = header_msg - if ( - is_root_process() - ): - # If we error in the root but the debugger is - # engaged we don't want to prematurely kill (and - # thus clobber access to) the local tty since it - # will make the pdb repl unusable. - # Instead try to wait for pdb to be released before - # tearing down. - ctx_in_debug: Context|None = Lock.ctx_in_debug - in_debug: tuple[str, str]|None = ( - ctx_in_debug.chan.uid - if ctx_in_debug - else None - ) - if in_debug == current_actor().uid: - log.debug( - msg - + - 'Root already owns the TTY LOCK' - ) - return True - - elif in_debug: - msg += ( - f'Debug `Lock` in use by subactor\n|\n|_{in_debug}\n' - ) - # TODO: could this make things more deterministic? - # wait to see if a sub-actor task will be - # scheduled and grab the tty lock on the next - # tick? - # XXX => but it doesn't seem to work.. - # await trio.testing.wait_all_tasks_blocked(cushion=0) - else: - logmeth( - msg - + - 'Root immediately acquired debug TTY LOCK' - ) - return False - - for istep in range(poll_steps): - if ( - Lock.req_handler_finished is not None - and not Lock.req_handler_finished.is_set() - and in_debug is not None - ): - # caller_frame_info: str = pformat_caller_frame() - logmeth( - msg - + - '\n^^ Root is waiting on tty lock release.. ^^\n' - # f'{caller_frame_info}\n' - ) - - if not any_connected_locker_child(): - Lock.get_locking_task_cs().cancel() - - with trio.CancelScope(shield=True): - await Lock.req_handler_finished.wait() - - log.devx( - f'Subactor released debug lock\n' - f'|_{in_debug}\n' - ) - break - - # is no subactor locking debugger currently? - if ( - in_debug is None - and ( - Lock.req_handler_finished is None - or Lock.req_handler_finished.is_set() - ) - ): - logmeth( - msg - + - 'Root acquired tty lock!' - ) - break - - else: - logmeth( - 'Root polling for debug:\n' - f'poll step: {istep}\n' - f'poll delya: {poll_delay}\n\n' - f'{Lock.repr()}\n' - ) - with CancelScope(shield=True): - await trio.sleep(poll_delay) - continue - - return True - - # else: - # # TODO: non-root call for #320? - # this_uid: tuple[str, str] = current_actor().uid - # async with acquire_debug_lock( - # subactor_uid=this_uid, - # ): - # pass - return False - - -class BoxedMaybeException(Struct): - ''' - Box a maybe-exception for post-crash introspection usage - from the body of a `open_crash_handler()` scope. - - ''' - value: BaseException|None = None - - -# TODO: better naming and what additionals? -# - [ ] optional runtime plugging? -# - [ ] detection for sync vs. async code? -# - [ ] specialized REPL entry when in distributed mode? -# -[x] hide tb by def -# - [x] allow ignoring kbi Bo -@cm -def open_crash_handler( - catch: set[BaseException] = { - BaseException, - }, - ignore: set[BaseException] = { - KeyboardInterrupt, - trio.Cancelled, - }, - tb_hide: bool = True, -): - ''' - Generic "post mortem" crash handler using `pdbp` REPL debugger. - - We expose this as a CLI framework addon to both `click` and - `typer` users so they can quickly wrap cmd endpoints which get - automatically wrapped to use the runtime's `debug_mode: bool` - AND `pdbp.pm()` around any code that is PRE-runtime entry - - any sync code which runs BEFORE the main call to - `trio.run()`. - - ''' - __tracebackhide__: bool = tb_hide - - # TODO, yield a `outcome.Error`-like boxed type? - # -[~] use `outcome.Value/Error` X-> frozen! - # -[x] write our own..? - # -[ ] consider just wtv is used by `pytest.raises()`? - # - boxed_maybe_exc = BoxedMaybeException() - err: BaseException - try: - yield boxed_maybe_exc - except tuple(catch) as err: - boxed_maybe_exc.value = err - if ( - type(err) not in ignore - and - not is_multi_cancelled( - err, - ignore_nested=ignore - ) - ): - try: - # use our re-impl-ed version - _post_mortem( - repl=mk_pdb(), - tb=sys.exc_info()[2], - api_frame=inspect.currentframe().f_back, - ) - except bdb.BdbQuit: - __tracebackhide__: bool = False - raise err - - # XXX NOTE, `pdbp`'s version seems to lose the up-stack - # tb-info? - # pdbp.xpm() - - raise err - - -@cm -def maybe_open_crash_handler( - pdb: bool = False, - tb_hide: bool = True, - - **kwargs, -): - ''' - Same as `open_crash_handler()` but with bool input flag - to allow conditional handling. - - Normally this is used with CLI endpoints such that if the --pdb - flag is passed the pdb REPL is engaed on any crashes B) - ''' - __tracebackhide__: bool = tb_hide - - rtctx = nullcontext( - enter_result=BoxedMaybeException() - ) - if pdb: - rtctx = open_crash_handler(**kwargs) - - with rtctx as boxed_maybe_exc: - yield boxed_maybe_exc diff --git a/tractor/devx/_frame_stack.py b/tractor/devx/_frame_stack.py index 8e9bf46f..c99d3ecd 100644 --- a/tractor/devx/_frame_stack.py +++ b/tractor/devx/_frame_stack.py @@ -20,13 +20,18 @@ as it pertains to improving the grok-ability of our runtime! ''' from __future__ import annotations +from contextlib import ( + _GeneratorContextManager, + _AsyncGeneratorContextManager, +) from functools import partial import inspect +import textwrap from types import ( FrameType, FunctionType, MethodType, - # CodeType, + CodeType, ) from typing import ( Any, @@ -34,6 +39,9 @@ from typing import ( Type, ) +import pdbp +from tractor.log import get_logger +import trio from tractor.msg import ( pretty_struct, NamespacePath, @@ -41,6 +49,8 @@ from tractor.msg import ( import wrapt +log = get_logger(__name__) + # TODO: yeah, i don't love this and we should prolly just # write a decorator that actually keeps a stupid ref to the func # obj.. @@ -301,3 +311,70 @@ def api_frame( # error_set: set[BaseException], # ) -> TracebackType: # ... + + +def hide_runtime_frames() -> dict[FunctionType, CodeType]: + ''' + Hide call-stack frames for various std-lib and `trio`-API primitives + such that the tracebacks presented from our runtime are as minimized + as possible, particularly from inside a `PdbREPL`. + + ''' + # XXX HACKZONE XXX + # hide exit stack frames on nurseries and cancel-scopes! + # |_ so avoid seeing it when the `pdbp` REPL is first engaged from + # inside a `trio.open_nursery()` scope (with no line after it + # in before the block end??). + # + # TODO: FINALLY got this workin originally with + # `@pdbp.hideframe` around the `wrapper()` def embedded inside + # `_ki_protection_decoratior()`.. which is in the module: + # /home/goodboy/.virtualenvs/tractor311/lib/python3.11/site-packages/trio/_core/_ki.py + # + # -[ ] make an issue and patch for `trio` core? maybe linked + # to the long outstanding `pdb` one below? + # |_ it's funny that there's frame hiding throughout `._run.py` + # but not where it matters on the below exit funcs.. + # + # -[ ] provide a patchset for the lonstanding + # |_ https://github.com/python-trio/trio/issues/1155 + # + # -[ ] make a linked issue to ^ and propose allowing all the + # `._core._run` code to have their `__tracebackhide__` value + # configurable by a `RunVar` to allow getting scheduler frames + # if desired through configuration? + # + # -[ ] maybe dig into the core `pdb` issue why the extra frame is shown + # at all? + # + funcs: list[FunctionType] = [ + trio._core._run.NurseryManager.__aexit__, + trio._core._run.CancelScope.__exit__, + _GeneratorContextManager.__exit__, + _AsyncGeneratorContextManager.__aexit__, + _AsyncGeneratorContextManager.__aenter__, + trio.Event.wait, + ] + func_list_str: str = textwrap.indent( + "\n".join(f.__qualname__ for f in funcs), + prefix=' |_ ', + ) + log.devx( + 'Hiding the following runtime frames by default:\n' + f'{func_list_str}\n' + ) + + codes: dict[FunctionType, CodeType] = {} + for ref in funcs: + # stash a pre-modified version of each ref's code-obj + # so it can be reverted later if needed. + codes[ref] = ref.__code__ + pdbp.hideframe(ref) + # + # pdbp.hideframe(trio._core._run.NurseryManager.__aexit__) + # pdbp.hideframe(trio._core._run.CancelScope.__exit__) + # pdbp.hideframe(_GeneratorContextManager.__exit__) + # pdbp.hideframe(_AsyncGeneratorContextManager.__aexit__) + # pdbp.hideframe(_AsyncGeneratorContextManager.__aenter__) + # pdbp.hideframe(trio.Event.wait) + return codes diff --git a/tractor/devx/_stackscope.py b/tractor/devx/_stackscope.py index ccc46534..77b85ff8 100644 --- a/tractor/devx/_stackscope.py +++ b/tractor/devx/_stackscope.py @@ -49,7 +49,7 @@ from tractor import ( _state, log as logmod, ) -from tractor.devx import _debug +from tractor.devx import debug log = logmod.get_logger(__name__) @@ -82,7 +82,7 @@ def dump_task_tree() -> None: if ( current_sigint_handler is not - _debug.DebugStatus._trio_handler + debug.DebugStatus._trio_handler ): sigint_handler_report: str = ( 'The default `trio` SIGINT handler was replaced?!' @@ -237,7 +237,7 @@ def enable_stack_on_sig( try: import stackscope except ImportError: - log.warning( + log.error( '`stackscope` not installed for use in debug mode!' ) return None @@ -255,8 +255,8 @@ def enable_stack_on_sig( dump_tree_on_sig, ) log.devx( - 'Enabling trace-trees on `SIGUSR1` ' - 'since `stackscope` is installed @ \n' + f'Enabling trace-trees on `SIGUSR1` ' + f'since `stackscope` is installed @ \n' f'{stackscope!r}\n\n' f'With `SIGUSR1` handler\n' f'|_{dump_tree_on_sig}\n' diff --git a/tractor/devx/debug/__init__.py b/tractor/devx/debug/__init__.py new file mode 100644 index 00000000..faf9f2f7 --- /dev/null +++ b/tractor/devx/debug/__init__.py @@ -0,0 +1,100 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Multi-actor debugging for da peeps! + +''' +from __future__ import annotations +from tractor.log import get_logger +from ._repl import ( + PdbREPL as PdbREPL, + mk_pdb as mk_pdb, + TractorConfig as TractorConfig, +) +from ._tty_lock import ( + DebugStatus as DebugStatus, + DebugStateError as DebugStateError, +) +from ._trace import ( + Lock as Lock, + _pause_msg as _pause_msg, + _repl_fail_msg as _repl_fail_msg, + _set_trace as _set_trace, + _sync_pause_from_builtin as _sync_pause_from_builtin, + breakpoint as breakpoint, + maybe_init_greenback as maybe_init_greenback, + maybe_import_greenback as maybe_import_greenback, + pause as pause, + pause_from_sync as pause_from_sync, +) +from ._post_mortem import ( + BoxedMaybeException as BoxedMaybeException, + maybe_open_crash_handler as maybe_open_crash_handler, + open_crash_handler as open_crash_handler, + post_mortem as post_mortem, + _crash_msg as _crash_msg, + _maybe_enter_pm as _maybe_enter_pm, +) +from ._sync import ( + maybe_wait_for_debugger as maybe_wait_for_debugger, + acquire_debug_lock as acquire_debug_lock, +) +from ._sigint import ( + sigint_shield as sigint_shield, + _ctlc_ignore_header as _ctlc_ignore_header +) + +log = get_logger(__name__) + +# ---------------- +# XXX PKG TODO XXX +# ---------------- +# refine the internal impl and APIs! +# +# -[ ] rework `._pause()` and it's branch-cases for root vs. +# subactor: +# -[ ] `._pause_from_root()` + `_pause_from_subactor()`? +# -[ ] do the de-factor based on bg-thread usage in +# `.pause_from_sync()` & `_pause_from_bg_root_thread()`. +# -[ ] drop `debug_func == None` case which is confusing af.. +# -[ ] factor out `_enter_repl_sync()` into a util func for calling +# the `_set_trace()` / `_post_mortem()` APIs? +# +# -[ ] figure out if we need `acquire_debug_lock()` and/or re-implement +# it as part of the `.pause_from_sync()` rework per above? +# +# -[ ] pair the `._pause_from_subactor()` impl with a "debug nursery" +# that's dynamically allocated inside the `._rpc` task thus +# avoiding the `._service_n.start()` usage for the IPC request? +# -[ ] see the TODO inside `._rpc._errors_relayed_via_ipc()` +# +# -[ ] impl a `open_debug_request()` which encaps all +# `request_root_stdio_lock()` task scheduling deats +# + `DebugStatus` state mgmt; which should prolly be re-branded as +# a `DebugRequest` type anyway AND with suppoort for bg-thread +# (from root actor) usage? +# +# -[ ] handle the `xonsh` case for bg-root-threads in the SIGINT +# handler! +# -[ ] do we need to do the same for subactors? +# -[ ] make the failing tests finally pass XD +# +# -[ ] simplify `maybe_wait_for_debugger()` to be a root-task only +# API? +# -[ ] currently it's implemented as that so might as well make it +# formal? diff --git a/tractor/devx/debug/_post_mortem.py b/tractor/devx/debug/_post_mortem.py new file mode 100644 index 00000000..ce4931cf --- /dev/null +++ b/tractor/devx/debug/_post_mortem.py @@ -0,0 +1,410 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Post-mortem debugging APIs and surrounding machinery for both +sync and async contexts. + +Generally we maintain the same semantics a `pdb.post.mortem()` but +with actor-tree-wide sync/cooperation around any (sub)actor's use of +the root's TTY. + +''' +from __future__ import annotations +import bdb +from contextlib import ( + AbstractContextManager, + contextmanager as cm, + nullcontext, +) +from functools import ( + partial, +) +import inspect +import sys +import traceback +from typing import ( + Callable, + Sequence, + Type, + TYPE_CHECKING, +) +from types import ( + TracebackType, + FrameType, +) + +from msgspec import Struct +import trio +from tractor._exceptions import ( + NoRuntime, +) +from tractor import _state +from tractor._state import ( + current_actor, + debug_mode, +) +from tractor.log import get_logger +from tractor._exceptions import ( + is_multi_cancelled, +) +from ._trace import ( + _pause, +) +from ._tty_lock import ( + DebugStatus, +) +from ._repl import ( + PdbREPL, + mk_pdb, + TractorConfig as TractorConfig, +) + +if TYPE_CHECKING: + from trio.lowlevel import Task + from tractor._runtime import ( + Actor, + ) + +_crash_msg: str = ( + 'Opening a pdb REPL in crashed actor' +) + +log = get_logger(__package__) + + +class BoxedMaybeException(Struct): + ''' + Box a maybe-exception for post-crash introspection usage + from the body of a `open_crash_handler()` scope. + + ''' + value: BaseException|None = None + + # handler can suppress crashes dynamically + raise_on_exit: bool|Sequence[Type[BaseException]] = True + + def pformat(self) -> str: + ''' + Repr the boxed `.value` error in more-than-string + repr form. + + ''' + if not self.value: + return f'<{type(self).__name__}( .value=None )>' + + return ( + f'<{type(self.value).__name__}(\n' + f' |_.value = {self.value}\n' + f')>\n' + ) + + __repr__ = pformat + + +def _post_mortem( + repl: PdbREPL, # normally passed by `_pause()` + + # XXX all `partial`-ed in by `post_mortem()` below! + tb: TracebackType, + api_frame: FrameType, + + shield: bool = False, + hide_tb: bool = True, + + # maybe pre/post REPL entry + repl_fixture: ( + AbstractContextManager[bool] + |None + ) = None, + + boxed_maybe_exc: BoxedMaybeException|None = None, + +) -> None: + ''' + Enter the ``pdbpp`` port mortem entrypoint using our custom + debugger instance. + + ''' + __tracebackhide__: bool = hide_tb + + # maybe enter any user fixture + enter_repl: bool = DebugStatus.maybe_enter_repl_fixture( + repl=repl, + repl_fixture=repl_fixture, + boxed_maybe_exc=boxed_maybe_exc, + ) + if not enter_repl: + return + try: + actor: Actor = current_actor() + actor_repr: str = str(actor.uid) + # ^TODO, instead a nice runtime-info + maddr + uid? + # -[ ] impl a `Actor.__repr()__`?? + # |_ : @ + + except NoRuntime: + actor_repr: str = '' + + try: + task_repr: Task = trio.lowlevel.current_task() + except RuntimeError: + task_repr: str = '' + + # TODO: print the actor supervion tree up to the root + # here! Bo + log.pdb( + f'{_crash_msg}\n' + f'x>(\n' + f' |_ {task_repr} @ {actor_repr}\n' + + ) + + # XXX NOTE(s) on `pdbp.xpm()` version.. + # + # - seems to lose the up-stack tb-info? + # - currently we're (only) replacing this from `pdbp.xpm()` + # to add the `end=''` to the print XD + # + print(traceback.format_exc(), end='') + caller_frame: FrameType = api_frame.f_back + + # NOTE, see the impl details of these in the lib to + # understand usage: + # - `pdbp.post_mortem()` + # - `pdbp.xps()` + # - `bdb.interaction()` + repl.reset() + repl.interaction( + frame=caller_frame, + # frame=None, + traceback=tb, + ) + + # XXX NOTE XXX: this is abs required to avoid hangs! + # + # Since we presume the post-mortem was enaged to + # a task-ending error, we MUST release the local REPL request + # so that not other local task nor the root remains blocked! + DebugStatus.release() + + +async def post_mortem( + *, + tb: TracebackType|None = None, + api_frame: FrameType|None = None, + hide_tb: bool = False, + + # TODO: support shield here just like in `pause()`? + # shield: bool = False, + + **_pause_kwargs, + +) -> None: + ''' + Our builtin async equivalient of `pdb.post_mortem()` which can be + used inside exception handlers. + + It's also used for the crash handler when `debug_mode == True` ;) + + ''' + __tracebackhide__: bool = hide_tb + + tb: TracebackType = tb or sys.exc_info()[2] + + # TODO: do upward stack scan for highest @api_frame and + # use its parent frame as the expected user-app code + # interact point. + api_frame: FrameType = api_frame or inspect.currentframe() + + # TODO, move to submod `._pausing` or ._api? _trace + await _pause( + debug_func=partial( + _post_mortem, + api_frame=api_frame, + tb=tb, + ), + hide_tb=hide_tb, + **_pause_kwargs + ) + + +async def _maybe_enter_pm( + err: BaseException, + *, + tb: TracebackType|None = None, + api_frame: FrameType|None = None, + hide_tb: bool = False, + + # only enter debugger REPL when returns `True` + debug_filter: Callable[ + [BaseException|BaseExceptionGroup], + bool, + ] = lambda err: not is_multi_cancelled(err), + **_pause_kws, +): + if ( + debug_mode() + + # NOTE: don't enter debug mode recursively after quitting pdb + # Iow, don't re-enter the repl if the `quit` command was issued + # by the user. + and not isinstance(err, bdb.BdbQuit) + + # XXX: if the error is the likely result of runtime-wide + # cancellation, we don't want to enter the debugger since + # there's races between when the parent actor has killed all + # comms and when the child tries to contact said parent to + # acquire the tty lock. + + # Really we just want to mostly avoid catching KBIs here so there + # might be a simpler check we can do? + and + debug_filter(err) + ): + api_frame: FrameType = api_frame or inspect.currentframe() + tb: TracebackType = tb or sys.exc_info()[2] + await post_mortem( + api_frame=api_frame, + tb=tb, + **_pause_kws, + ) + return True + + else: + return False + + +# TODO: better naming and what additionals? +# - [ ] optional runtime plugging? +# - [ ] detection for sync vs. async code? +# - [ ] specialized REPL entry when in distributed mode? +# -[x] hide tb by def +# - [x] allow ignoring kbi Bo +@cm +def open_crash_handler( + catch: set[BaseException] = { + BaseException, + }, + ignore: set[BaseException] = { + KeyboardInterrupt, + trio.Cancelled, + }, + hide_tb: bool = True, + + repl_fixture: ( + AbstractContextManager[bool] # pre/post REPL entry + |None + ) = None, + raise_on_exit: bool|Sequence[Type[BaseException]] = True, +): + ''' + Generic "post mortem" crash handler using `pdbp` REPL debugger. + + We expose this as a CLI framework addon to both `click` and + `typer` users so they can quickly wrap cmd endpoints which get + automatically wrapped to use the runtime's `debug_mode: bool` + AND `pdbp.pm()` around any code that is PRE-runtime entry + - any sync code which runs BEFORE the main call to + `trio.run()`. + + ''' + __tracebackhide__: bool = hide_tb + + # TODO, yield a `outcome.Error`-like boxed type? + # -[~] use `outcome.Value/Error` X-> frozen! + # -[x] write our own..? + # -[ ] consider just wtv is used by `pytest.raises()`? + # + boxed_maybe_exc = BoxedMaybeException( + raise_on_exit=raise_on_exit, + ) + err: BaseException + try: + yield boxed_maybe_exc + except tuple(catch) as err: + boxed_maybe_exc.value = err + if ( + type(err) not in ignore + and + not is_multi_cancelled( + err, + ignore_nested=ignore + ) + ): + try: + # use our re-impl-ed version of `pdbp.xpm()` + _post_mortem( + repl=mk_pdb(), + tb=sys.exc_info()[2], + api_frame=inspect.currentframe().f_back, + hide_tb=hide_tb, + + repl_fixture=repl_fixture, + boxed_maybe_exc=boxed_maybe_exc, + ) + except bdb.BdbQuit: + __tracebackhide__: bool = False + raise err + + if ( + raise_on_exit is True + or ( + raise_on_exit is not False + and ( + set(raise_on_exit) + and + type(err) in raise_on_exit + ) + ) + and + boxed_maybe_exc.raise_on_exit == raise_on_exit + ): + raise err + + +@cm +def maybe_open_crash_handler( + pdb: bool|None = None, + hide_tb: bool = True, + + **kwargs, +): + ''' + Same as `open_crash_handler()` but with bool input flag + to allow conditional handling. + + Normally this is used with CLI endpoints such that if the --pdb + flag is passed the pdb REPL is engaed on any crashes B) + + ''' + __tracebackhide__: bool = hide_tb + + if pdb is None: + pdb: bool = _state.is_debug_mode() + + rtctx = nullcontext( + enter_result=BoxedMaybeException() + ) + if pdb: + rtctx = open_crash_handler( + hide_tb=hide_tb, + **kwargs, + ) + + with rtctx as boxed_maybe_exc: + yield boxed_maybe_exc diff --git a/tractor/devx/debug/_repl.py b/tractor/devx/debug/_repl.py new file mode 100644 index 00000000..1c0f03cc --- /dev/null +++ b/tractor/devx/debug/_repl.py @@ -0,0 +1,207 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +`pdpp.Pdb` extentions/customization and other delegate usage. + +''' +from functools import ( + cached_property, +) +import os + +import pdbp +from tractor._state import ( + is_root_process, +) + +from ._tty_lock import ( + Lock, + DebugStatus, +) + + +class TractorConfig(pdbp.DefaultConfig): + ''' + Custom `pdbp` config which tries to use the best tradeoff + between pretty and minimal. + + ''' + use_pygments: bool = True + sticky_by_default: bool = False + enable_hidden_frames: bool = True + + # much thanks @mdmintz for the hot tip! + # fixes line spacing issue when resizing terminal B) + truncate_long_lines: bool = False + + # ------ - ------ + # our own custom config vars mostly + # for syncing with the actor tree's singleton + # TTY `Lock`. + + +class PdbREPL(pdbp.Pdb): + ''' + Add teardown hooks and local state describing any + ongoing TTY `Lock` request dialog. + + ''' + # override the pdbp config with our coolio one + # NOTE: this is only loaded when no `~/.pdbrc` exists + # so we should prolly pass it into the .__init__() instead? + # i dunno, see the `DefaultFactory` and `pdb.Pdb` impls. + DefaultConfig = TractorConfig + + status = DebugStatus + + # NOTE: see details in stdlib's `bdb.py` + # def user_exception(self, frame, exc_info): + # ''' + # Called when we stop on an exception. + # ''' + # log.warning( + # 'Exception during REPL sesh\n\n' + # f'{frame}\n\n' + # f'{exc_info}\n\n' + # ) + + # NOTE: this actually hooks but i don't see anyway to detect + # if an error was caught.. this is why currently we just always + # call `DebugStatus.release` inside `_post_mortem()`. + # def preloop(self): + # print('IN PRELOOP') + # super().preloop() + + # TODO: cleaner re-wrapping of all this? + # -[ ] figure out how to disallow recursive .set_trace() entry + # since that'll cause deadlock for us. + # -[ ] maybe a `@cm` to call `super().()`? + # -[ ] look at hooking into the `pp` hook specially with our + # own set of pretty-printers? + # * `.pretty_struct.Struct.pformat()` + # * `.pformat(MsgType.pld)` + # * `.pformat(Error.tb_str)`? + # * .. maybe more? + # + def set_continue(self): + try: + super().set_continue() + finally: + # NOTE: for subactors the stdio lock is released via the + # allocated RPC locker task, so for root we have to do it + # manually. + if ( + is_root_process() + and + Lock._debug_lock.locked() + and + DebugStatus.is_main_trio_thread() + ): + # Lock.release(raise_on_thread=False) + Lock.release() + + # XXX AFTER `Lock.release()` for root local repl usage + DebugStatus.release() + + def set_quit(self): + try: + super().set_quit() + finally: + if ( + is_root_process() + and + Lock._debug_lock.locked() + and + DebugStatus.is_main_trio_thread() + ): + # Lock.release(raise_on_thread=False) + Lock.release() + + # XXX after `Lock.release()` for root local repl usage + DebugStatus.release() + + # XXX NOTE: we only override this because apparently the stdlib pdb + # bois likes to touch the SIGINT handler as much as i like to touch + # my d$%&. + def _cmdloop(self): + self.cmdloop() + + @cached_property + def shname(self) -> str | None: + ''' + Attempt to return the login shell name with a special check for + the infamous `xonsh` since it seems to have some issues much + different from std shells when it comes to flushing the prompt? + + ''' + # SUPER HACKY and only really works if `xonsh` is not used + # before spawning further sub-shells.. + shpath = os.getenv('SHELL', None) + + if shpath: + if ( + os.getenv('XONSH_LOGIN', default=False) + or 'xonsh' in shpath + ): + return 'xonsh' + + return os.path.basename(shpath) + + return None + + +def mk_pdb() -> PdbREPL: + ''' + Deliver a new `PdbREPL`: a multi-process safe `pdbp.Pdb`-variant + using the magic of `tractor`'s SC-safe IPC. + + B) + + Our `pdb.Pdb` subtype accomplishes multi-process safe debugging + by: + + - mutexing access to the root process' std-streams (& thus parent + process TTY) via an IPC managed `Lock` singleton per + actor-process tree. + + - temporarily overriding any subactor's SIGINT handler to shield + during live REPL sessions in sub-actors such that cancellation + is never (mistakenly) triggered by a ctrl-c and instead only by + explicit runtime API requests or after the + `pdb.Pdb.interaction()` call has returned. + + FURTHER, the `pdbp.Pdb` instance is configured to be `trio` + "compatible" from a SIGINT handling perspective; we mask out + the default `pdb` handler and instead apply `trio`s default + which mostly addresses all issues described in: + + - https://github.com/python-trio/trio/issues/1155 + + The instance returned from this factory should always be + preferred over the default `pdb[p].set_trace()` whenever using + a `pdb` REPL inside a `trio` based runtime. + + ''' + pdb = PdbREPL() + + # XXX: These are the important flags mentioned in + # https://github.com/python-trio/trio/issues/1155 + # which resolve the traceback spews to console. + pdb.allow_kbdint = True + pdb.nosigint = True + return pdb diff --git a/tractor/devx/debug/_sigint.py b/tractor/devx/debug/_sigint.py new file mode 100644 index 00000000..80f79e58 --- /dev/null +++ b/tractor/devx/debug/_sigint.py @@ -0,0 +1,333 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +A custom SIGINT handler which mainly shields actor (task) +cancellation during REPL interaction. + +''' +from __future__ import annotations +from typing import ( + TYPE_CHECKING, +) +import trio +from tractor.log import get_logger +from tractor._state import ( + current_actor, + is_root_process, +) +from ._repl import ( + PdbREPL, +) +from ._tty_lock import ( + any_connected_locker_child, + DebugStatus, + Lock, +) + +if TYPE_CHECKING: + from tractor.ipc import ( + Channel, + ) + from tractor._runtime import ( + Actor, + ) + +log = get_logger(__name__) + +_ctlc_ignore_header: str = ( + 'Ignoring SIGINT while debug REPL in use' +) + + +def sigint_shield( + signum: int, + frame: 'frame', # type: ignore # noqa + *args, + +) -> None: + ''' + Specialized, debugger-aware SIGINT handler. + + In childred we always ignore/shield for SIGINT to avoid + deadlocks since cancellation should always be managed by the + supervising parent actor. The root actor-proces is always + cancelled on ctrl-c. + + ''' + __tracebackhide__: bool = True + actor: Actor = current_actor() + + def do_cancel(): + # If we haven't tried to cancel the runtime then do that instead + # of raising a KBI (which may non-gracefully destroy + # a ``trio.run()``). + if not actor._cancel_called: + actor.cancel_soon() + + # If the runtime is already cancelled it likely means the user + # hit ctrl-c again because teardown didn't fully take place in + # which case we do the "hard" raising of a local KBI. + else: + raise KeyboardInterrupt + + # only set in the actor actually running the REPL + repl: PdbREPL|None = DebugStatus.repl + + # TODO: maybe we should flatten out all these cases using + # a match/case? + # + # root actor branch that reports whether or not a child + # has locked debugger. + if is_root_process(): + # log.warning( + log.devx( + 'Handling SIGINT in root actor\n' + f'{Lock.repr()}' + f'{DebugStatus.repr()}\n' + ) + # try to see if the supposed (sub)actor in debug still + # has an active connection to *this* actor, and if not + # it's likely they aren't using the TTY lock / debugger + # and we should propagate SIGINT normally. + any_connected: bool = any_connected_locker_child() + + problem = ( + f'root {actor.uid} handling SIGINT\n' + f'any_connected: {any_connected}\n\n' + + f'{Lock.repr()}\n' + ) + + if ( + (ctx := Lock.ctx_in_debug) + and + (uid_in_debug := ctx.chan.uid) # "someone" is (ostensibly) using debug `Lock` + ): + name_in_debug: str = uid_in_debug[0] + assert not repl + # if not repl: # but it's NOT us, the root actor. + # sanity: since no repl ref is set, we def shouldn't + # be the lock owner! + assert name_in_debug != 'root' + + # IDEAL CASE: child has REPL as expected + if any_connected: # there are subactors we can contact + # XXX: only if there is an existing connection to the + # (sub-)actor in debug do we ignore SIGINT in this + # parent! Otherwise we may hang waiting for an actor + # which has already terminated to unlock. + # + # NOTE: don't emit this with `.pdb()` level in + # root without a higher level. + log.runtime( + _ctlc_ignore_header + + + f' by child ' + f'{uid_in_debug}\n' + ) + problem = None + + else: + problem += ( + '\n' + f'A `pdb` REPL is SUPPOSEDLY in use by child {uid_in_debug}\n' + f'BUT, no child actors are IPC contactable!?!?\n' + ) + + # IDEAL CASE: root has REPL as expected + else: + # root actor still has this SIGINT handler active without + # an actor using the `Lock` (a bug state) ?? + # => so immediately cancel any stale lock cs and revert + # the handler! + if not DebugStatus.repl: + # TODO: WHEN should we revert back to ``trio`` + # handler if this one is stale? + # -[ ] maybe after a counts work of ctl-c mashes? + # -[ ] use a state var like `stale_handler: bool`? + problem += ( + 'No subactor is using a `pdb` REPL according `Lock.ctx_in_debug`?\n' + 'BUT, the root should be using it, WHY this handler ??\n\n' + 'So either..\n' + '- some root-thread is using it but has no `.repl` set?, OR\n' + '- something else weird is going on outside the runtime!?\n' + ) + else: + # NOTE: since we emit this msg on ctl-c, we should + # also always re-print the prompt the tail block! + log.pdb( + _ctlc_ignore_header + + + f' by root actor..\n' + f'{DebugStatus.repl_task}\n' + f' |_{repl}\n' + ) + problem = None + + # XXX if one is set it means we ARE NOT operating an ideal + # case where a child subactor or us (the root) has the + # lock without any other detected problems. + if problem: + + # detect, report and maybe clear a stale lock request + # cancel scope. + lock_cs: trio.CancelScope = Lock.get_locking_task_cs() + maybe_stale_lock_cs: bool = ( + lock_cs is not None + and not lock_cs.cancel_called + ) + if maybe_stale_lock_cs: + problem += ( + '\n' + 'Stale `Lock.ctx_in_debug._scope: CancelScope` detected?\n' + f'{Lock.ctx_in_debug}\n\n' + + '-> Calling ctx._scope.cancel()!\n' + ) + lock_cs.cancel() + + # TODO: wen do we actually want/need this, see above. + # DebugStatus.unshield_sigint() + log.warning(problem) + + # child actor that has locked the debugger + elif not is_root_process(): + log.debug( + f'Subactor {actor.uid} handling SIGINT\n\n' + f'{Lock.repr()}\n' + ) + + rent_chan: Channel = actor._parent_chan + if ( + rent_chan is None + or + not rent_chan.connected() + ): + log.warning( + 'This sub-actor thinks it is debugging ' + 'but it has no connection to its parent ??\n' + f'{actor.uid}\n' + 'Allowing SIGINT propagation..' + ) + DebugStatus.unshield_sigint() + + repl_task: str|None = DebugStatus.repl_task + req_task: str|None = DebugStatus.req_task + if ( + repl_task + and + repl + ): + log.pdb( + _ctlc_ignore_header + + + f' by local task\n\n' + f'{repl_task}\n' + f' |_{repl}\n' + ) + elif req_task: + log.debug( + _ctlc_ignore_header + + + f' by local request-task and either,\n' + f'- someone else is already REPL-in and has the `Lock`, or\n' + f'- some other local task already is replin?\n\n' + f'{req_task}\n' + ) + + # TODO can we remove this now? + # -[ ] does this path ever get hit any more? + else: + msg: str = ( + 'SIGINT shield handler still active BUT, \n\n' + ) + if repl_task is None: + msg += ( + '- No local task claims to be in debug?\n' + ) + + if repl is None: + msg += ( + '- No local REPL is currently active?\n' + ) + + if req_task is None: + msg += ( + '- No debug request task is active?\n' + ) + + log.warning( + msg + + + 'Reverting handler to `trio` default!\n' + ) + DebugStatus.unshield_sigint() + + # XXX ensure that the reverted-to-handler actually is + # able to rx what should have been **this** KBI ;) + do_cancel() + + # TODO: how to handle the case of an intermediary-child actor + # that **is not** marked in debug mode? See oustanding issue: + # https://github.com/goodboy/tractor/issues/320 + # elif debug_mode(): + + # maybe redraw/print last REPL output to console since + # we want to alert the user that more input is expect since + # nothing has been done dur to ignoring sigint. + if ( + DebugStatus.repl # only when current actor has a REPL engaged + ): + flush_status: str = ( + 'Flushing stdout to ensure new prompt line!\n' + ) + + # XXX: yah, mega hack, but how else do we catch this madness XD + if ( + repl.shname == 'xonsh' + ): + flush_status += ( + '-> ALSO re-flushing due to `xonsh`..\n' + ) + repl.stdout.write(repl.prompt) + + # log.warning( + log.devx( + flush_status + ) + repl.stdout.flush() + + # TODO: better console UX to match the current "mode": + # -[ ] for example if in sticky mode where if there is output + # detected as written to the tty we redraw this part underneath + # and erase the past draw of this same bit above? + # repl.sticky = True + # repl._print_if_sticky() + + # also see these links for an approach from `ptk`: + # https://github.com/goodboy/tractor/issues/130#issuecomment-663752040 + # https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py + else: + log.devx( + # log.warning( + 'Not flushing stdout since not needed?\n' + f'|_{repl}\n' + ) + + # XXX only for tracing this handler + log.devx('exiting SIGINT') diff --git a/tractor/devx/debug/_sync.py b/tractor/devx/debug/_sync.py new file mode 100644 index 00000000..cf4bb334 --- /dev/null +++ b/tractor/devx/debug/_sync.py @@ -0,0 +1,220 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Debugger synchronization APIs to ensure orderly access and +non-TTY-clobbering graceful teardown. + + +''' +from __future__ import annotations +from contextlib import ( + asynccontextmanager as acm, +) +from functools import ( + partial, +) +from typing import ( + AsyncGenerator, + Callable, +) + +from tractor.log import get_logger +import trio +from trio.lowlevel import ( + current_task, + Task, +) +from tractor._context import Context +from tractor._state import ( + current_actor, + debug_mode, + is_root_process, +) +from ._repl import ( + TractorConfig as TractorConfig, +) +from ._tty_lock import ( + Lock, + request_root_stdio_lock, + any_connected_locker_child, +) +from ._sigint import ( + sigint_shield as sigint_shield, + _ctlc_ignore_header as _ctlc_ignore_header +) + +log = get_logger(__package__) + + +async def maybe_wait_for_debugger( + poll_steps: int = 2, + poll_delay: float = 0.1, + child_in_debug: bool = False, + + header_msg: str = '', + _ll: str = 'devx', + +) -> bool: # was locked and we polled? + + if ( + not debug_mode() + and + not child_in_debug + ): + return False + + logmeth: Callable = getattr(log, _ll) + + msg: str = header_msg + if ( + is_root_process() + ): + # If we error in the root but the debugger is + # engaged we don't want to prematurely kill (and + # thus clobber access to) the local tty since it + # will make the pdb repl unusable. + # Instead try to wait for pdb to be released before + # tearing down. + ctx_in_debug: Context|None = Lock.ctx_in_debug + in_debug: tuple[str, str]|None = ( + ctx_in_debug.chan.uid + if ctx_in_debug + else None + ) + if in_debug == current_actor().uid: + log.debug( + msg + + + 'Root already owns the TTY LOCK' + ) + return True + + elif in_debug: + msg += ( + f'Debug `Lock` in use by subactor\n|\n|_{in_debug}\n' + ) + # TODO: could this make things more deterministic? + # wait to see if a sub-actor task will be + # scheduled and grab the tty lock on the next + # tick? + # XXX => but it doesn't seem to work.. + # await trio.testing.wait_all_tasks_blocked(cushion=0) + else: + logmeth( + msg + + + 'Root immediately acquired debug TTY LOCK' + ) + return False + + for istep in range(poll_steps): + if ( + Lock.req_handler_finished is not None + and not Lock.req_handler_finished.is_set() + and in_debug is not None + ): + # caller_frame_info: str = pformat_caller_frame() + logmeth( + msg + + + '\n^^ Root is waiting on tty lock release.. ^^\n' + # f'{caller_frame_info}\n' + ) + + if not any_connected_locker_child(): + Lock.get_locking_task_cs().cancel() + + with trio.CancelScope(shield=True): + await Lock.req_handler_finished.wait() + + log.devx( + f'Subactor released debug lock\n' + f'|_{in_debug}\n' + ) + break + + # is no subactor locking debugger currently? + if ( + in_debug is None + and ( + Lock.req_handler_finished is None + or Lock.req_handler_finished.is_set() + ) + ): + logmeth( + msg + + + 'Root acquired tty lock!' + ) + break + + else: + logmeth( + 'Root polling for debug:\n' + f'poll step: {istep}\n' + f'poll delya: {poll_delay}\n\n' + f'{Lock.repr()}\n' + ) + with trio.CancelScope(shield=True): + await trio.sleep(poll_delay) + continue + + return True + + # else: + # # TODO: non-root call for #320? + # this_uid: tuple[str, str] = current_actor().uid + # async with acquire_debug_lock( + # subactor_uid=this_uid, + # ): + # pass + return False + + +@acm +async def acquire_debug_lock( + subactor_uid: tuple[str, str], +) -> AsyncGenerator[ + trio.CancelScope|None, + tuple, +]: + ''' + Request to acquire the TTY `Lock` in the root actor, release on + exit. + + This helper is for actor's who don't actually need to acquired + the debugger but want to wait until the lock is free in the + process-tree root such that they don't clobber an ongoing pdb + REPL session in some peer or child! + + ''' + if not debug_mode(): + yield None + return + + task: Task = current_task() + async with trio.open_nursery() as n: + ctx: Context = await n.start( + partial( + request_root_stdio_lock, + actor_uid=subactor_uid, + task_uid=(task.name, id(task)), + ) + ) + yield ctx + ctx.cancel() diff --git a/tractor/devx/debug/_trace.py b/tractor/devx/debug/_trace.py new file mode 100644 index 00000000..70d39325 --- /dev/null +++ b/tractor/devx/debug/_trace.py @@ -0,0 +1,1254 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Debugger/tracing public API. + +Essentially providing the same +`pdb(p).set_trace()`/`breakpoint()`-style REPL UX but with seemless +mult-process support within a single actor tree. + +''' +from __future__ import annotations +import asyncio +import bdb +from contextlib import ( + AbstractContextManager, +) +from functools import ( + partial, +) +import inspect +import threading +from typing import ( + Callable, + TYPE_CHECKING, +) +from types import ( + FrameType, + ModuleType, +) + +import trio +from trio.lowlevel import ( + current_task, + Task, +) +from trio import ( + TaskStatus, +) +import tractor +from tractor.log import get_logger +from tractor.to_asyncio import run_trio_task_in_future +from tractor._context import Context +from tractor import _state +from tractor._exceptions import ( + NoRuntime, +) +from tractor._state import ( + current_actor, + current_ipc_ctx, + is_root_process, +) +from ._repl import ( + PdbREPL, + mk_pdb, + TractorConfig as TractorConfig, +) +from ._tty_lock import ( + DebugStatus, + DebugStateError, + Lock, + request_root_stdio_lock, +) +from ._sigint import ( + sigint_shield as sigint_shield, + _ctlc_ignore_header as _ctlc_ignore_header +) + +if TYPE_CHECKING: + from trio.lowlevel import Task + from threading import Thread + from tractor._runtime import ( + Actor, + ) + # from ._post_mortem import BoxedMaybeException + from ._repl import PdbREPL + +log = get_logger(__package__) + +_pause_msg: str = 'Opening a pdb REPL in paused actor' +_repl_fail_msg: str|None = ( + 'Failed to REPl via `_pause()` ' +) + +async def _pause( + + debug_func: Callable|partial|None, + + # NOTE: must be passed in the `.pause_from_sync()` case! + repl: PdbREPL|None = None, + + # TODO: allow caller to pause despite task cancellation, + # exactly the same as wrapping with: + # with CancelScope(shield=True): + # await pause() + # => the REMAINING ISSUE is that the scope's .__exit__() frame + # is always show in the debugger on entry.. and there seems to + # be no way to override it?.. + # + shield: bool = False, + hide_tb: bool = True, + called_from_sync: bool = False, + called_from_bg_thread: bool = False, + task_status: TaskStatus[ + tuple[Task, PdbREPL], + trio.Event + ] = trio.TASK_STATUS_IGNORED, + + # maybe pre/post REPL entry + repl_fixture: ( + AbstractContextManager[bool] + |None + ) = None, + + **debug_func_kwargs, + +) -> tuple[Task, PdbREPL]|None: + ''' + Inner impl for `pause()` to avoid the `trio.CancelScope.__exit__()` + stack frame when not shielded (since apparently i can't figure out + how to hide it using the normal mechanisms..) + + Hopefully we won't need this in the long run. + + ''' + __tracebackhide__: bool = hide_tb + pause_err: BaseException|None = None + actor: Actor = current_actor() + try: + task: Task = current_task() + except RuntimeError as rte: + # NOTE, 2 cases we might get here: + # + # - ACTUALLY not a `trio.lowlevel.Task` nor runtime caller, + # |_ error out as normal + # + # - an infected `asycio` actor calls it from an actual + # `asyncio.Task` + # |_ in this case we DO NOT want to RTE! + __tracebackhide__: bool = False + if actor.is_infected_aio(): + log.exception( + 'Failed to get current `trio`-task?' + ) + raise RuntimeError( + 'An `asyncio` task should not be calling this!?' + ) from rte + else: + task = asyncio.current_task() + + if debug_func is not None: + debug_func = partial(debug_func) + + # XXX NOTE XXX set it here to avoid ctl-c from cancelling a debug + # request from a subactor BEFORE the REPL is entered by that + # process. + if ( + not repl + and + debug_func + ): + repl: PdbREPL = mk_pdb() + DebugStatus.shield_sigint() + + # TODO: move this into a `open_debug_request()` @acm? + # -[ ] prolly makes the most sense to do the request + # task spawn as part of an `@acm` api which delivers the + # `DebugRequest` instance and ensures encapsing all the + # pld-spec and debug-nursery? + # -[ ] maybe make this a `PdbREPL` method or mod func? + # -[ ] factor out better, main reason for it is common logic for + # both root and sub repl entry + def _enter_repl_sync( + debug_func: partial[None], + ) -> None: + __tracebackhide__: bool = hide_tb + + # maybe enter any user fixture + enter_repl: bool = DebugStatus.maybe_enter_repl_fixture( + repl=repl, + repl_fixture=repl_fixture, + ) + if not enter_repl: + return + + debug_func_name: str = ( + debug_func.func.__name__ if debug_func else 'None' + ) + + # TODO: do we want to support using this **just** for the + # locking / common code (prolly to help address #320)? + task_status.started((task, repl)) + try: + if debug_func: + # block here one (at the appropriate frame *up*) where + # ``breakpoint()`` was awaited and begin handling stdio. + log.devx( + 'Entering sync world of the `pdb` REPL for task..\n' + f'{repl}\n' + f' |_{task}\n' + ) + + # set local task on process-global state to avoid + # recurrent entries/requests from the same + # actor-local task. + DebugStatus.repl_task = task + if repl: + DebugStatus.repl = repl + else: + log.error( + 'No REPl instance set before entering `debug_func`?\n' + f'{debug_func}\n' + ) + + # invoke the low-level REPL activation routine which itself + # should call into a `Pdb.set_trace()` of some sort. + debug_func( + repl=repl, + hide_tb=hide_tb, + **debug_func_kwargs, + ) + + # TODO: maybe invert this logic and instead + # do `assert debug_func is None` when + # `called_from_sync`? + else: + if ( + called_from_sync + and + not DebugStatus.is_main_trio_thread() + ): + assert called_from_bg_thread + assert DebugStatus.repl_task is not task + + return (task, repl) + + except trio.Cancelled: + log.exception( + 'Cancelled during invoke of internal\n\n' + f'`debug_func = {debug_func_name}`\n' + ) + # XXX NOTE: DON'T release lock yet + raise + + except BaseException: + __tracebackhide__: bool = False + log.exception( + 'Failed to invoke internal\n\n' + f'`debug_func = {debug_func_name}`\n' + ) + # NOTE: OW this is ONLY called from the + # `.set_continue/next` hooks! + DebugStatus.release(cancel_req_task=True) + + raise + + log.debug( + 'Entering `._pause()` for requesting task\n' + f'|_{task}\n' + ) + + # TODO: this should be created as part of `DebugRequest()` init + # which should instead be a one-shot-use singleton much like + # the `PdbREPL`. + repl_task: Thread|Task|None = DebugStatus.repl_task + if ( + not DebugStatus.repl_release + or + DebugStatus.repl_release.is_set() + ): + log.debug( + 'Setting new `DebugStatus.repl_release: trio.Event` for requesting task\n' + f'|_{task}\n' + ) + DebugStatus.repl_release = trio.Event() + else: + log.devx( + 'Already an existing actor-local REPL user task\n' + f'|_{repl_task}\n' + ) + + # ^-NOTE-^ this must be created BEFORE scheduling any subactor + # debug-req task since it needs to wait on it just after + # `.started()`-ing back its wrapping `.req_cs: CancelScope`. + + repl_err: BaseException|None = None + try: + if is_root_process(): + # we also wait in the root-parent for any child that + # may have the tty locked prior + # TODO: wait, what about multiple root tasks (with bg + # threads) acquiring it though? + ctx: Context|None = Lock.ctx_in_debug + repl_task: Task|None = DebugStatus.repl_task + if ( + ctx is None + and + repl_task is task + # and + # DebugStatus.repl + # ^-NOTE-^ matches for multi-threaded case as well? + ): + # re-entrant root process already has it: noop. + log.warning( + f'This root actor task is already within an active REPL session\n' + f'Ignoring this recurrent`tractor.pause()` entry\n\n' + f'|_{task}\n' + # TODO: use `._frame_stack` scanner to find the @api_frame + ) + with trio.CancelScope(shield=shield): + await trio.lowlevel.checkpoint() + return (repl, task) + + # elif repl_task: + # log.warning( + # f'This root actor has another task already in REPL\n' + # f'Waitin for the other task to complete..\n\n' + # f'|_{task}\n' + # # TODO: use `._frame_stack` scanner to find the @api_frame + # ) + # with trio.CancelScope(shield=shield): + # await DebugStatus.repl_release.wait() + # await trio.sleep(0.1) + + # must shield here to avoid hitting a `Cancelled` and + # a child getting stuck bc we clobbered the tty + with trio.CancelScope(shield=shield): + ctx_line = '`Lock` in this root actor task' + acq_prefix: str = 'shield-' if shield else '' + if ( + Lock._debug_lock.locked() + ): + if ctx: + ctx_line: str = ( + 'active `Lock` owned by ctx\n\n' + f'{ctx}' + ) + elif Lock._owned_by_root: + ctx_line: str = ( + 'Already owned by root-task `Lock`\n\n' + f'repl_task: {DebugStatus.repl_task}\n' + f'repl: {DebugStatus.repl}\n' + ) + else: + ctx_line: str = ( + '**STALE `Lock`** held by unknown root/remote task ' + 'with no request ctx !?!?' + ) + + log.debug( + f'attempting to {acq_prefix}acquire ' + f'{ctx_line}' + ) + await Lock._debug_lock.acquire() + Lock._owned_by_root = True + # else: + + # if ( + # not called_from_bg_thread + # and not called_from_sync + # ): + # log.devx( + # f'attempting to {acq_prefix}acquire ' + # f'{ctx_line}' + # ) + + # XXX: since we need to enter pdb synchronously below, + # and we don't want to block the thread that starts + # stepping through the application thread, we later + # must `Lock._debug_lock.release()` manually from + # some `PdbREPL` completion callback(`.set_[continue/exit]()`). + # + # So, when `._pause()` is called from a (bg/non-trio) + # thread, special provisions are needed and we need + # to do the `.acquire()`/`.release()` calls from + # a common `trio.task` (due to internal impl of + # `FIFOLock`). Thus we do not acquire here and + # instead expect `.pause_from_sync()` to take care of + # this detail depending on the caller's (threading) + # usage. + # + # NOTE that this special case is ONLY required when + # using `.pause_from_sync()` from the root actor + # since OW a subactor will instead make an IPC + # request (in the branch below) to acquire the + # `Lock`-mutex and a common root-actor RPC task will + # take care of `._debug_lock` mgmt! + + # enter REPL from root, no TTY locking IPC ctx necessary + # since we can acquire the `Lock._debug_lock` directly in + # thread. + return _enter_repl_sync(debug_func) + + # TODO: need a more robust check for the "root" actor + elif ( + not is_root_process() + and actor._parent_chan # a connected child + ): + repl_task: Task|None = DebugStatus.repl_task + req_task: Task|None = DebugStatus.req_task + if req_task: + log.warning( + f'Already an ongoing repl request?\n' + f'|_{req_task}\n\n' + + f'REPL task is\n' + f'|_{repl_task}\n\n' + + ) + # Recurrent entry case. + # this task already has the lock and is likely + # recurrently entering a `.pause()`-point either bc, + # - someone is hacking on runtime internals and put + # one inside code that get's called on the way to + # this code, + # - a legit app task uses the 'next' command while in + # a REPL sesh, and actually enters another + # `.pause()` (in a loop or something). + # + # XXX Any other cose is likely a bug. + if ( + repl_task + ): + if repl_task is task: + log.warning( + f'{task.name}@{actor.uid} already has TTY lock\n' + f'ignoring..' + ) + with trio.CancelScope(shield=shield): + await trio.lowlevel.checkpoint() + return + + else: + # if **this** actor is already in debug REPL we want + # to maintain actor-local-task mutex access, so block + # here waiting for the control to be released - this + # -> allows for recursive entries to `tractor.pause()` + log.warning( + f'{task}@{actor.uid} already has TTY lock\n' + f'waiting for release..' + ) + with trio.CancelScope(shield=shield): + await DebugStatus.repl_release.wait() + await trio.sleep(0.1) + + elif ( + req_task + ): + log.warning( + 'Local task already has active debug request\n' + f'|_{task}\n\n' + + 'Waiting for previous request to complete..\n' + ) + with trio.CancelScope(shield=shield): + await DebugStatus.req_finished.wait() + + # this **must** be awaited by the caller and is done using the + # root nursery so that the debugger can continue to run without + # being restricted by the scope of a new task nursery. + + # TODO: if we want to debug a trio.Cancelled triggered exception + # we have to figure out how to avoid having the service nursery + # cancel on this task start? I *think* this works below: + # ```python + # actor._service_n.cancel_scope.shield = shield + # ``` + # but not entirely sure if that's a sane way to implement it? + + # NOTE currently we spawn the lock request task inside this + # subactor's global `Actor._service_n` so that the + # lifetime of the lock-request can outlive the current + # `._pause()` scope while the user steps through their + # application code and when they finally exit the + # session, via 'continue' or 'quit' cmds, the `PdbREPL` + # will manually call `DebugStatus.release()` to release + # the lock session with the root actor. + # + # TODO: ideally we can add a tighter scope for this + # request task likely by conditionally opening a "debug + # nursery" inside `_errors_relayed_via_ipc()`, see the + # todo in tht module, but + # -[ ] it needs to be outside the normal crash handling + # `_maybe_enter_debugger()` block-call. + # -[ ] we probably only need to allocate the nursery when + # we detect the runtime is already in debug mode. + # + curr_ctx: Context = current_ipc_ctx() + # req_ctx: Context = await curr_ctx._debug_tn.start( + log.devx( + 'Starting request task\n' + f'|_{task}\n' + ) + with trio.CancelScope(shield=shield): + req_ctx: Context = await actor._service_n.start( + partial( + request_root_stdio_lock, + actor_uid=actor.uid, + task_uid=(task.name, id(task)), # task uuid (effectively) + shield=shield, + ) + ) + # XXX sanity, our locker task should be the one which + # entered a new IPC ctx with the root actor, NOT the one + # that exists around the task calling into `._pause()`. + assert ( + req_ctx + is + DebugStatus.req_ctx + is not + curr_ctx + ) + + # enter REPL + return _enter_repl_sync(debug_func) + + # TODO: prolly factor this plus the similar block from + # `_enter_repl_sync()` into a common @cm? + except BaseException as _pause_err: + pause_err: BaseException = _pause_err + _repl_fail_report: str|None = _repl_fail_msg + if isinstance(pause_err, bdb.BdbQuit): + log.devx( + 'REPL for pdb was explicitly quit!\n' + ) + _repl_fail_report = None + + # when the actor is mid-runtime cancellation the + # `Actor._service_n` might get closed before we can spawn + # the request task, so just ignore expected RTE. + elif ( + isinstance(pause_err, RuntimeError) + and + actor._cancel_called + ): + # service nursery won't be usable and we + # don't want to lock up the root either way since + # we're in (the midst of) cancellation. + log.warning( + 'Service nursery likely closed due to actor-runtime cancellation..\n' + 'Ignoring failed debugger lock request task spawn..\n' + ) + return + + elif isinstance(pause_err, trio.Cancelled): + _repl_fail_report += ( + 'You called `tractor.pause()` from an already cancelled scope!\n\n' + 'Consider `await tractor.pause(shield=True)` to make it work B)\n' + ) + + else: + _repl_fail_report += f'on behalf of {repl_task} ??\n' + + if _repl_fail_report: + log.exception(_repl_fail_report) + + if not actor.is_infected_aio(): + DebugStatus.release(cancel_req_task=True) + + # sanity checks for ^ on request/status teardown + # assert DebugStatus.repl is None # XXX no more bc bg thread cases? + assert DebugStatus.repl_task is None + + # sanity, for when hackin on all this? + if not isinstance(pause_err, trio.Cancelled): + req_ctx: Context = DebugStatus.req_ctx + # if req_ctx: + # # XXX, bc the child-task in root might cancel it? + # # assert req_ctx._scope.cancel_called + # assert req_ctx.maybe_error + + raise + + finally: + # set in finally block of func.. this can be synced-to + # eventually with a debug_nursery somehow? + # assert DebugStatus.req_task is None + + # always show frame when request fails due to internal + # failure in the above code (including an `BdbQuit`). + if ( + DebugStatus.req_err + or + repl_err + or + pause_err + ): + __tracebackhide__: bool = False + + +def _set_trace( + repl: PdbREPL, # passed by `_pause()` + hide_tb: bool, + + # partial-ed in by `.pause()` + api_frame: FrameType, + + # optionally passed in to provide support for + # `pause_from_sync()` where + actor: tractor.Actor|None = None, + task: Task|Thread|None = None, +): + __tracebackhide__: bool = hide_tb + actor: tractor.Actor = actor or current_actor() + task: Task|Thread = task or current_task() + + # else: + # TODO: maybe print the actor supervion tree up to the + # root here? Bo + log.pdb( + f'{_pause_msg}\n' + f'>(\n' + f'|_{actor.uid}\n' + f' |_{task}\n' # @ {actor.uid}\n' + # f'|_{task}\n' + # ^-TODO-^ more compact pformating? + # -[ ] make an `Actor.__repr()__` + # -[ ] should we use `log.pformat_task_uid()`? + ) + # presuming the caller passed in the "api frame" + # (the last frame before user code - like `.pause()`) + # then we only step up one frame to where the user + # called our API. + caller_frame: FrameType = api_frame.f_back # type: ignore + + # pretend this frame is the caller frame to show + # the entire call-stack all the way down to here. + if not hide_tb: + caller_frame: FrameType = inspect.currentframe() + + # engage ze REPL + # B~() + repl.set_trace(frame=caller_frame) + + +# XXX TODO! XXX, ensure `pytest -s` doesn't just +# hang on this being called in a test.. XD +# -[ ] maybe something in our test suite or is there +# some way we can detect output capture is enabled +# from the process itself? +# |_ronny: ? +# +async def pause( + *, + hide_tb: bool = True, + api_frame: FrameType|None = None, + + # TODO: figure out how to still make this work: + # -[ ] pass it direct to `_pause()`? + # -[ ] use it to set the `debug_nursery.cancel_scope.shield` + shield: bool = False, + **_pause_kwargs, + +) -> None: + ''' + A pause point (more commonly known as a "breakpoint") interrupt + instruction for engaging a blocking debugger instance to + conduct manual console-based-REPL-interaction from within + `tractor`'s async runtime, normally from some single-threaded + and currently executing actor-hosted-`trio`-task in some + (remote) process. + + NOTE: we use the semantics "pause" since it better encompasses + the entirety of the necessary global-runtime-state-mutation any + actor-task must access and lock in order to get full isolated + control over the process tree's root TTY: + https://en.wikipedia.org/wiki/Breakpoint + + ''' + __tracebackhide__: bool = hide_tb + + # always start 1 level up from THIS in user code since normally + # `tractor.pause()` is called explicitly by use-app code thus + # making it the highest up @api_frame. + api_frame: FrameType = api_frame or inspect.currentframe() + + # XXX TODO: this was causing cs-stack corruption in trio due to + # usage within the `Context._scope_nursery` (which won't work + # based on scoping of it versus call to `_maybe_enter_debugger()` + # from `._rpc._invoke()`) + # with trio.CancelScope( + # shield=shield, + # ) as cs: + # NOTE: so the caller can always manually cancel even + # if shielded! + # task_status.started(cs) + # log.critical( + # '`.pause() cancel-scope is:\n\n' + # f'{pformat_cs(cs, var_name="pause_cs")}\n\n' + # ) + await _pause( + debug_func=partial( + _set_trace, + api_frame=api_frame, + ), + shield=shield, + **_pause_kwargs + ) + # XXX avoid cs stack corruption when `PdbREPL.interaction()` + # raises `BdbQuit`. + # await DebugStatus.req_finished.wait() + + +_gb_mod: None|ModuleType|False = None + + +def maybe_import_greenback( + raise_not_found: bool = True, + force_reload: bool = False, + +) -> ModuleType|False: + # be cached-fast on module-already-inited + global _gb_mod + + if _gb_mod is False: + return False + + elif ( + _gb_mod is not None + and not force_reload + ): + return _gb_mod + + try: + import greenback + _gb_mod = greenback + return greenback + + except ModuleNotFoundError as mnf: + log.debug( + '`greenback` is not installed.\n' + 'No sync debug support!\n' + ) + _gb_mod = False + + if raise_not_found: + raise RuntimeError( + 'The `greenback` lib is required to use `tractor.pause_from_sync()`!\n' + 'https://github.com/oremanj/greenback\n' + ) from mnf + + return False + + +async def maybe_init_greenback(**kwargs) -> None|ModuleType: + try: + if mod := maybe_import_greenback(**kwargs): + await mod.ensure_portal() + log.devx( + '`greenback` portal opened!\n' + 'Sync debug support activated!\n' + ) + return mod + except BaseException: + log.exception('Failed to init `greenback`..') + raise + + return None + + +async def _pause_from_bg_root_thread( + behalf_of_thread: Thread, + repl: PdbREPL, + hide_tb: bool, + task_status: TaskStatus[Task] = trio.TASK_STATUS_IGNORED, + **_pause_kwargs, +): + ''' + Acquire the `Lock._debug_lock` from a bg (only need for + root-actor) non-`trio` thread (started via a call to + `.to_thread.run_sync()` in some actor) by scheduling this func in + the actor's service (TODO eventually a special debug_mode) + nursery. This task acquires the lock then `.started()`s the + `DebugStatus.repl_release: trio.Event` waits for the `PdbREPL` to + set it, then terminates very much the same way as + `request_root_stdio_lock()` uses an IPC `Context` from a subactor + to do the same from a remote process. + + This task is normally only required to be scheduled for the + special cases of a bg sync thread running in the root actor; see + the only usage inside `.pause_from_sync()`. + + ''' + global Lock + # TODO: unify this copied code with where it was + # from in `maybe_wait_for_debugger()` + # if ( + # Lock.req_handler_finished is not None + # and not Lock.req_handler_finished.is_set() + # and (in_debug := Lock.ctx_in_debug) + # ): + # log.devx( + # '\nRoot is waiting on tty lock to release from\n\n' + # # f'{caller_frame_info}\n' + # ) + # with trio.CancelScope(shield=True): + # await Lock.req_handler_finished.wait() + + # log.pdb( + # f'Subactor released debug lock\n' + # f'|_{in_debug}\n' + # ) + task: Task = current_task() + + # Manually acquire since otherwise on release we'll + # get a RTE raised by `trio` due to ownership.. + log.devx( + 'Trying to acquire `Lock` on behalf of bg thread\n' + f'|_{behalf_of_thread}\n' + ) + + # NOTE: this is already a task inside the main-`trio`-thread, so + # we don't need to worry about calling it another time from the + # bg thread on which who's behalf this task is operating. + DebugStatus.shield_sigint() + + out = await _pause( + debug_func=None, + repl=repl, + hide_tb=hide_tb, + called_from_sync=True, + called_from_bg_thread=True, + **_pause_kwargs + ) + DebugStatus.repl_task = behalf_of_thread + + lock: trio.FIFOLock = Lock._debug_lock + stats: trio.LockStatistics= lock.statistics() + assert stats.owner is task + assert Lock._owned_by_root + assert DebugStatus.repl_release + + # TODO: do we actually need this? + # originally i was trying to solve wy this was + # unblocking too soon in a thread but it was actually + # that we weren't setting our own `repl_release` below.. + while stats.owner is not task: + log.devx( + 'Trying to acquire `._debug_lock` from {stats.owner} for\n' + f'|_{behalf_of_thread}\n' + ) + await lock.acquire() + break + + # XXX NOTE XXX super important dawg.. + # set our own event since the current one might + # have already been overriden and then set when the + # last REPL mutex holder exits their sesh! + # => we do NOT want to override any existing one + # and we want to ensure we set our own ONLY AFTER we have + # acquired the `._debug_lock` + repl_release = DebugStatus.repl_release = trio.Event() + + # unblock caller thread delivering this bg task + log.devx( + 'Unblocking root-bg-thread since we acquired lock via `._pause()`\n' + f'|_{behalf_of_thread}\n' + ) + task_status.started(out) + + # wait for bg thread to exit REPL sesh. + try: + await repl_release.wait() + finally: + log.devx( + 'releasing lock from bg root thread task!\n' + f'|_ {behalf_of_thread}\n' + ) + Lock.release() + + +def pause_from_sync( + hide_tb: bool = True, + called_from_builtin: bool = False, + api_frame: FrameType|None = None, + + allow_no_runtime: bool = False, + + # proxy to `._pause()`, for ex: + # shield: bool = False, + # api_frame: FrameType|None = None, + **_pause_kwargs, + +) -> None: + ''' + Pause a `tractor` scheduled task or thread from sync (non-async + function) code. + + When `greenback` is installed we remap python's builtin + `breakpoint()` hook to this runtime-aware version which takes + care of all bg-thread detection and appropriate synchronization + with the root actor's `Lock` to avoid mult-thread/process REPL + clobbering Bo + + ''' + __tracebackhide__: bool = hide_tb + repl_owner: Task|Thread|None = None + try: + actor: tractor.Actor = current_actor( + err_on_no_runtime=False, + ) + if ( + not actor + and + not allow_no_runtime + ): + raise NoRuntime( + 'The actor runtime has not been opened?\n\n' + '`tractor.pause_from_sync()` is not functional without a wrapping\n' + '- `async with tractor.open_nursery()` or,\n' + '- `async with tractor.open_root_actor()`\n\n' + + 'If you are getting this from a builtin `breakpoint()` call\n' + 'it might mean the runtime was started then ' + 'stopped prematurely?\n' + ) + message: str = ( + f'{actor.uid} task called `tractor.pause_from_sync()`\n' + ) + + repl: PdbREPL = mk_pdb() + + # message += f'-> created local REPL {repl}\n' + is_trio_thread: bool = DebugStatus.is_main_trio_thread() + is_root: bool = is_root_process() + is_infected_aio: bool = actor.is_infected_aio() + thread: Thread = threading.current_thread() + + asyncio_task: asyncio.Task|None = None + if is_infected_aio: + asyncio_task = asyncio.current_task() + + # TODO: we could also check for a non-`.to_thread` context + # using `trio.from_thread.check_cancelled()` (says + # oremanj) wherein we get the following outputs: + # + # `RuntimeError`: non-`.to_thread` spawned thread + # noop: non-cancelled `.to_thread` + # `trio.Cancelled`: cancelled `.to_thread` + + # CASE: bg-thread spawned via `trio.to_thread` + # ----- + # when called from a (bg) thread, run an async task in a new + # thread which will call `._pause()` manually with special + # handling for root-actor caller usage. + if ( + not is_trio_thread + and + not asyncio_task + ): + # TODO: `threading.Lock()` this so we don't get races in + # multi-thr cases where they're acquiring/releasing the + # REPL and setting request/`Lock` state, etc.. + repl_owner: Thread = thread + + # TODO: make root-actor bg thread usage work! + if is_root: + message += ( + f'-> called from a root-actor bg {thread}\n' + ) + + message += ( + '-> scheduling `._pause_from_bg_root_thread()`..\n' + ) + # XXX SUBTLE BADNESS XXX that should really change! + # don't over-write the `repl` here since when + # this behalf-of-bg_thread-task calls pause it will + # pass `debug_func=None` which will result in it + # returing a `repl==None` output and that get's also + # `.started(out)` back here! So instead just ignore + # that output and assign the `repl` created above! + bg_task, _ = trio.from_thread.run( + afn=partial( + actor._service_n.start, + partial( + _pause_from_bg_root_thread, + behalf_of_thread=thread, + repl=repl, + hide_tb=hide_tb, + **_pause_kwargs, + ), + ), + ) + DebugStatus.shield_sigint() + message += ( + f'-> `._pause_from_bg_root_thread()` started bg task {bg_task}\n' + ) + else: + message += f'-> called from a bg {thread}\n' + # NOTE: since this is a subactor, `._pause()` will + # internally issue a debug request via + # `request_root_stdio_lock()` and we don't need to + # worry about all the special considerations as with + # the root-actor per above. + bg_task, _ = trio.from_thread.run( + afn=partial( + _pause, + debug_func=None, + repl=repl, + hide_tb=hide_tb, + + # XXX to prevent `._pause()` for setting + # `DebugStatus.repl_task` to the gb task! + called_from_sync=True, + called_from_bg_thread=True, + + **_pause_kwargs + ), + ) + # ?TODO? XXX where do we NEED to call this in the + # subactor-bg-thread case? + DebugStatus.shield_sigint() + assert bg_task is not DebugStatus.repl_task + + # TODO: once supported, remove this AND the one + # inside `._pause()`! + # outstanding impl fixes: + # -[ ] need to make `.shield_sigint()` below work here! + # -[ ] how to handle `asyncio`'s new SIGINT-handler + # injection? + # -[ ] should `breakpoint()` work and what does it normally + # do in `asyncio` ctxs? + # if actor.is_infected_aio(): + # raise RuntimeError( + # '`tractor.pause[_from_sync]()` not yet supported ' + # 'for infected `asyncio` mode!' + # ) + # + # CASE: bg-thread running `asyncio.Task` + # ----- + elif ( + not is_trio_thread + and + is_infected_aio # as in, the special actor-runtime mode + # ^NOTE XXX, that doesn't mean the caller is necessarily + # an `asyncio.Task` just that `trio` has been embedded on + # the `asyncio` event loop! + and + asyncio_task # transitive caller is an actual `asyncio.Task` + ): + greenback: ModuleType = maybe_import_greenback() + + if greenback.has_portal(): + DebugStatus.shield_sigint() + fute: asyncio.Future = run_trio_task_in_future( + partial( + _pause, + debug_func=None, + repl=repl, + hide_tb=hide_tb, + + # XXX to prevent `._pause()` for setting + # `DebugStatus.repl_task` to the gb task! + called_from_sync=True, + called_from_bg_thread=True, + + **_pause_kwargs + ) + ) + repl_owner = asyncio_task + bg_task, _ = greenback.await_(fute) + # TODO: ASYNC version -> `.pause_from_aio()`? + # bg_task, _ = await fute + + # handle the case where an `asyncio` task has been + # spawned WITHOUT enabling a `greenback` portal.. + # => can often happen in 3rd party libs. + else: + bg_task = repl_owner + + # TODO, ostensibly we can just acquire the + # debug lock directly presuming we're the + # root actor running in infected asyncio + # mode? + # + # TODO, this would be a special case where + # a `_pause_from_root()` would come in very + # handy! + # if is_root: + # import pdbp; pdbp.set_trace() + # log.warning( + # 'Allowing `asyncio` task to acquire debug-lock in root-actor..\n' + # 'This is not fully implemented yet; there may be teardown hangs!\n\n' + # ) + # else: + + # simply unsupported, since there exists no hack (i + # can think of) to workaround this in a subactor + # which needs to lock the root's REPL ow we're sure + # to get prompt stdstreams clobbering.. + cf_repr: str = '' + if api_frame: + caller_frame: FrameType = api_frame.f_back + cf_repr: str = f'caller_frame: {caller_frame!r}\n' + + raise RuntimeError( + f"CAN'T USE `greenback._await()` without a portal !?\n\n" + f'Likely this task was NOT spawned via the `tractor.to_asyncio` API..\n' + f'{asyncio_task}\n' + f'{cf_repr}\n' + + f'Prolly the task was started out-of-band (from some lib?)\n' + f'AND one of the below was never called ??\n' + f'- greenback.ensure_portal()\n' + f'- greenback.bestow_portal()\n' + ) + + # CASE: `trio.run()` + "main thread" + # ----- + else: + # raises on not-found by default + greenback: ModuleType = maybe_import_greenback() + + # TODO: how to ensure this is either dynamically (if + # needed) called here (in some bg tn??) or that the + # subactor always already called it? + # greenback: ModuleType = await maybe_init_greenback() + + message += f'-> imported {greenback}\n' + + # NOTE XXX seems to need to be set BEFORE the `_pause()` + # invoke using gb below? + DebugStatus.shield_sigint() + repl_owner: Task = current_task() + + message += '-> calling `greenback.await_(_pause(debug_func=None))` from sync caller..\n' + try: + out = greenback.await_( + _pause( + debug_func=None, + repl=repl, + hide_tb=hide_tb, + called_from_sync=True, + **_pause_kwargs, + ) + ) + except RuntimeError as rte: + if not _state._runtime_vars.get( + 'use_greenback', + False, + ): + raise RuntimeError( + '`greenback` was never initialized in this actor!?\n\n' + f'{_state._runtime_vars}\n' + ) from rte + + raise + + if out: + bg_task, _ = out + else: + bg_task: Task = current_task() + + # assert repl is repl + # assert bg_task is repl_owner + if bg_task is not repl_owner: + raise DebugStateError( + f'The registered bg task for this debug request is NOT its owner ??\n' + f'bg_task: {bg_task}\n' + f'repl_owner: {repl_owner}\n\n' + + f'{DebugStatus.repr()}\n' + ) + + # NOTE: normally set inside `_enter_repl_sync()` + DebugStatus.repl_task: str = repl_owner + + # TODO: ensure we aggressively make the user aware about + # entering the global `breakpoint()` built-in from sync + # code? + message += ( + f'-> successfully scheduled `._pause()` in `trio` thread on behalf of {bg_task}\n' + f'-> Entering REPL via `tractor._set_trace()` from caller {repl_owner}\n' + ) + log.devx(message) + + # NOTE set as late as possible to avoid state clobbering + # in the multi-threaded case! + DebugStatus.repl = repl + + _set_trace( + api_frame=api_frame or inspect.currentframe(), + repl=repl, + hide_tb=hide_tb, + actor=actor, + task=repl_owner, + ) + # LEGACY NOTE on next LOC's frame showing weirdness.. + # + # XXX NOTE XXX no other LOC can be here without it + # showing up in the REPL's last stack frame !?! + # -[ ] tried to use `@pdbp.hideframe` decoration but + # still doesn't work + except BaseException as err: + log.exception( + 'Failed to sync-pause from\n\n' + f'{repl_owner}\n' + ) + __tracebackhide__: bool = False + raise err + + +def _sync_pause_from_builtin( + *args, + called_from_builtin=True, + **kwargs, +) -> None: + ''' + Proxy call `.pause_from_sync()` but indicate the caller is the + `breakpoint()` built-in. + + Note: this always assigned to `os.environ['PYTHONBREAKPOINT']` + inside `._root.open_root_actor()` whenever `debug_mode=True` is + set. + + ''' + pause_from_sync( + *args, + called_from_builtin=True, + api_frame=inspect.currentframe(), + **kwargs, + ) + + +# NOTE prefer a new "pause" semantic since it better describes +# "pausing the actor's runtime" for this particular +# paralell task to do debugging in a REPL. +async def breakpoint( + hide_tb: bool = True, + **kwargs, +): + log.warning( + '`tractor.breakpoint()` is deprecated!\n' + 'Please use `tractor.pause()` instead!\n' + ) + __tracebackhide__: bool = hide_tb + await pause( + api_frame=inspect.currentframe(), + **kwargs, + ) diff --git a/tractor/devx/debug/_tty_lock.py b/tractor/devx/debug/_tty_lock.py new file mode 100644 index 00000000..78f5d16a --- /dev/null +++ b/tractor/devx/debug/_tty_lock.py @@ -0,0 +1,1239 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Root-actor TTY mutex-locking machinery. + +''' +from __future__ import annotations +import asyncio +from contextlib import ( + AbstractContextManager, + asynccontextmanager as acm, + ExitStack, +) +import textwrap +import threading +import signal +from typing import ( + Any, + AsyncIterator, + Callable, + TypeAlias, + TYPE_CHECKING, +) +from types import ( + FrameType, +) + +from msgspec import Struct +import pdbp +import sniffio +import trio +from trio import CancelScope +from trio.lowlevel import ( + current_task, +) +from trio import ( + TaskStatus, +) +import tractor +from tractor.to_asyncio import run_trio_task_in_future +from tractor.log import get_logger +from tractor._context import Context +from tractor import _state +from tractor._exceptions import ( + DebugRequestError, + InternalError, +) +from tractor._state import ( + current_actor, + is_root_process, +) + +if TYPE_CHECKING: + from trio.lowlevel import Task + from threading import Thread + from tractor.ipc import ( + IPCServer, + ) + from tractor._runtime import ( + Actor, + ) + from ._repl import ( + PdbREPL, + ) + from ._post_mortem import ( + BoxedMaybeException, + ) + +log = get_logger(__name__) + + +class LockStatus( + Struct, + tag=True, + tag_field='msg_type', +): + subactor_uid: tuple[str, str] + cid: str + locked: bool + + +class LockRelease( + Struct, + tag=True, + tag_field='msg_type', +): + subactor_uid: tuple[str, str] + cid: str + + +__pld_spec__: TypeAlias = LockStatus|LockRelease + + +# TODO: instantiate this only in root from factory +# so as to allow runtime errors from subactors. +class Lock: + ''' + Actor-tree-global debug lock state, exists only in a root process. + + Mostly to avoid a lot of global declarations for now XD. + + ''' + @staticmethod + def get_locking_task_cs() -> CancelScope|None: + if not is_root_process(): + raise RuntimeError( + '`Lock.locking_task_cs` is invalid in subactors!' + ) + + if ctx := Lock.ctx_in_debug: + return ctx._scope + + return None + + # TODO: once we convert to singleton-per-actor-style + # @property + # def stats(cls) -> trio.LockStatistics: + # return cls._debug_lock.statistics() + + # @property + # def owner(cls) -> Task: + # return cls._debug_lock.statistics().owner + + # ROOT ONLY + # ------ - ------- + # the root-actor-ONLY singletons for, + # - the uid of the actor who's task is using a REPL + # - a literal task-lock, + # - a shielded-cancel-scope around the acquiring task*, + # - a broadcast event to signal no-actor using a REPL in tree, + # - a filter list to block subs-by-uid from locking. + # + # * in case it needs to be manually cancelled in root due to + # a stale lock condition (eg. IPC failure with the locking + # child + ctx_in_debug: Context|None = None + req_handler_finished: trio.Event|None = None + + _owned_by_root: bool = False + _debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock() + _blocked: set[ + tuple[str, str] # `Actor.uid` for per actor + |str # Context.cid for per task + ] = set() + + @classmethod + def repr(cls) -> str: + lock_stats: trio.LockStatistics = cls._debug_lock.statistics() + req: trio.Event|None = cls.req_handler_finished + fields: str = ( + f'|_ ._blocked: {cls._blocked}\n' + f'|_ ._debug_lock: {cls._debug_lock}\n' + f' {lock_stats}\n\n' + + f'|_ .ctx_in_debug: {cls.ctx_in_debug}\n' + f'|_ .req_handler_finished: {req}\n' + ) + if req: + req_stats: trio.EventStatistics = req.statistics() + fields += f' {req_stats}\n' + + body: str = textwrap.indent( + fields, + prefix=' ', + ) + return ( + f'<{cls.__name__}(\n' + f'{body}' + ')>\n\n' + ) + + @classmethod + # @pdbp.hideframe + def release( + cls, + raise_on_thread: bool = True, + + ) -> bool: + ''' + Release the actor-tree global TTY stdio lock (only) from the + `trio.run()`-main-thread. + + ''' + we_released: bool = False + ctx_in_debug: Context|None = cls.ctx_in_debug + repl_task: Task|Thread|None = DebugStatus.repl_task + try: + if not DebugStatus.is_main_trio_thread(): + thread: threading.Thread = threading.current_thread() + message: str = ( + '`Lock.release()` can not be called from a non-main-`trio` thread!\n' + f'{thread}\n' + ) + if raise_on_thread: + raise RuntimeError(message) + + log.devx(message) + return False + + task: Task = current_task() + message: str = ( + 'TTY NOT RELEASED on behalf of caller\n' + f'|_{task}\n' + ) + + # sanity check that if we're the root actor + # the lock is marked as such. + # note the pre-release value may be diff the the + # post-release task. + if repl_task is task: + assert cls._owned_by_root + message: str = ( + 'TTY lock held by root-actor on behalf of local task\n' + f'|_{repl_task}\n' + ) + else: + assert DebugStatus.repl_task is not task + + lock: trio.StrictFIFOLock = cls._debug_lock + owner: Task = lock.statistics().owner + if ( + lock.locked() + and + (owner is task) + # ^-NOTE-^ if we do NOT ensure this, `trio` will + # raise a RTE when a non-owner tries to releasee the + # lock. + # + # Further we need to be extra pedantic about the + # correct task, greenback-spawned-task and/or thread + # being set to the `.repl_task` such that the above + # condition matches and we actually release the lock. + # + # This is particular of note from `.pause_from_sync()`! + ): + cls._debug_lock.release() + we_released: bool = True + if repl_task: + message: str = ( + 'TTY released on behalf of root-actor-local REPL owner\n' + f'|_{repl_task}\n' + ) + else: + message: str = ( + 'TTY released by us on behalf of remote peer?\n' + f'{ctx_in_debug}\n' + ) + + except RuntimeError as rte: + log.exception( + 'Failed to release `Lock._debug_lock: trio.FIFOLock`?\n' + ) + raise rte + + finally: + # IFF there are no more requesting tasks queued up fire, the + # "tty-unlocked" event thereby alerting any monitors of the lock that + # we are now back in the "tty unlocked" state. This is basically + # and edge triggered signal around an empty queue of sub-actor + # tasks that may have tried to acquire the lock. + lock_stats: trio.LockStatistics = cls._debug_lock.statistics() + req_handler_finished: trio.Event|None = Lock.req_handler_finished + if ( + not lock_stats.owner + and + req_handler_finished is None + ): + message += ( + '-> No new task holds the TTY lock!\n\n' + f'{Lock.repr()}\n' + ) + + elif ( + req_handler_finished # new IPC ctx debug request active + and + lock.locked() # someone has the lock + ): + behalf_of_task = ( + ctx_in_debug + or + repl_task + ) + message += ( + f'A non-caller task still owns this lock on behalf of\n' + f'{behalf_of_task}\n' + f'lock owner task: {lock_stats.owner}\n' + ) + + if ( + we_released + and + ctx_in_debug + ): + cls.ctx_in_debug = None # unset + + # post-release value (should be diff then value above!) + repl_task: Task|Thread|None = DebugStatus.repl_task + if ( + cls._owned_by_root + and + we_released + ): + cls._owned_by_root = False + + if task is not repl_task: + message += ( + 'Lock released by root actor on behalf of bg thread\n' + f'|_{repl_task}\n' + ) + + if message: + log.devx(message) + + return we_released + + @classmethod + @acm + async def acquire_for_ctx( + cls, + ctx: Context, + + ) -> AsyncIterator[trio.StrictFIFOLock]: + ''' + Acquire a root-actor local FIFO lock which tracks mutex access of + the process tree's global debugger breakpoint. + + This lock avoids tty clobbering (by preventing multiple processes + reading from stdstreams) and ensures multi-actor, sequential access + to the ``pdb`` repl. + + ''' + if not is_root_process(): + raise RuntimeError('Only callable by a root actor task!') + + # subactor_uid: tuple[str, str] = ctx.chan.uid + we_acquired: bool = False + log.runtime( + f'Attempting to acquire TTY lock for sub-actor\n' + f'{ctx}' + ) + try: + pre_msg: str = ( + f'Entering lock checkpoint for sub-actor\n' + f'{ctx}' + ) + stats = cls._debug_lock.statistics() + if owner := stats.owner: + pre_msg += ( + f'\n' + f'`Lock` already held by local task?\n' + f'{owner}\n\n' + # f'On behalf of task: {cls.remote_task_in_debug!r}\n' + f'On behalf of IPC ctx\n' + f'{ctx}' + ) + log.runtime(pre_msg) + + # NOTE: if the surrounding cancel scope from the + # `lock_stdio_for_peer()` caller is cancelled, this line should + # unblock and NOT leave us in some kind of + # a "child-locked-TTY-but-child-is-uncontactable-over-IPC" + # condition. + await cls._debug_lock.acquire() + cls.ctx_in_debug = ctx + we_acquired = True + + log.runtime( + f'TTY lock acquired for sub-actor\n' + f'{ctx}' + ) + + # NOTE: critical section: this yield is unshielded! + # + # IF we received a cancel during the shielded lock entry of some + # next-in-queue requesting task, then the resumption here will + # result in that ``trio.Cancelled`` being raised to our caller + # (likely from `lock_stdio_for_peer()` below)! In + # this case the ``finally:`` below should trigger and the + # surrounding caller side context should cancel normally + # relaying back to the caller. + + yield cls._debug_lock + + finally: + message :str = 'Exiting `Lock.acquire_for_ctx()` on behalf of sub-actor\n' + if we_acquired: + cls.release() + message += '-> TTY lock released by child\n' + + else: + message += '-> TTY lock never acquired by child??\n' + + log.runtime( + f'{message}\n' + f'{ctx}' + ) + + +def get_lock() -> Lock: + return Lock + + +@tractor.context( + # enable the locking msgspec + pld_spec=__pld_spec__, +) +async def lock_stdio_for_peer( + ctx: Context, + subactor_task_uid: tuple[str, int], + +) -> LockStatus|LockRelease: + ''' + Lock the TTY in the root process of an actor tree in a new + inter-actor-context-task such that the ``pdbp`` debugger console + can be mutex-allocated to the calling sub-actor for REPL control + without interference by other processes / threads. + + NOTE: this task must be invoked in the root process of the actor + tree. It is meant to be invoked as an rpc-task and should be + highly reliable at releasing the mutex complete! + + ''' + subactor_uid: tuple[str, str] = ctx.chan.uid + + # mark the tty lock as being in use so that the runtime + # can try to avoid clobbering any connection from a child + # that's currently relying on it. + we_finished = Lock.req_handler_finished = trio.Event() + lock_blocked: bool = False + try: + if ctx.cid in Lock._blocked: + raise RuntimeError( + f'Double lock request!?\n' + f'The same remote task already has an active request for TTY lock ??\n\n' + f'subactor uid: {subactor_uid}\n\n' + + 'This might be mean that the requesting task ' + 'in `request_root_stdio_lock()` may have crashed?\n' + 'Consider that an internal bug exists given the TTY ' + '`Lock`ing IPC dialog..\n' + ) + Lock._blocked.add(ctx.cid) + lock_blocked = True + root_task_name: str = current_task().name + if tuple(subactor_uid) in Lock._blocked: + log.warning( + f'Subactor is blocked from acquiring debug lock..\n' + f'subactor_uid: {subactor_uid}\n' + f'remote task: {subactor_task_uid}\n' + ) + ctx._enter_debugger_on_cancel: bool = False + message: str = ( + f'Debug lock blocked for subactor\n\n' + f'x)<= {subactor_uid}\n\n' + + f'Likely because the root actor already started shutdown and is ' + 'closing IPC connections for this child!\n\n' + 'Cancelling debug request!\n' + ) + log.cancel(message) + await ctx.cancel() + raise DebugRequestError(message) + + log.devx( + 'Subactor attempting to acquire TTY lock\n' + f'root task: {root_task_name}\n' + f'subactor_uid: {subactor_uid}\n' + f'remote task: {subactor_task_uid}\n' + ) + DebugStatus.shield_sigint() + + # NOTE: we use the IPC ctx's cancel scope directly in order to + # ensure that on any transport failure, or cancellation request + # from the child we expect + # `Context._maybe_cancel_and_set_remote_error()` to cancel this + # scope despite the shielding we apply below. + debug_lock_cs: CancelScope = ctx._scope + + async with Lock.acquire_for_ctx(ctx=ctx): + debug_lock_cs.shield = True + + log.devx( + 'Subactor acquired debugger request lock!\n' + f'root task: {root_task_name}\n' + f'subactor_uid: {subactor_uid}\n' + f'remote task: {subactor_task_uid}\n\n' + + 'Sending `ctx.started(LockStatus)`..\n' + + ) + + # indicate to child that we've locked stdio + await ctx.started( + LockStatus( + subactor_uid=subactor_uid, + cid=ctx.cid, + locked=True, + ) + ) + + log.devx( + f'Actor {subactor_uid} acquired `Lock` via debugger request' + ) + + # wait for unlock pdb by child + async with ctx.open_stream() as stream: + release_msg: LockRelease = await stream.receive() + + # TODO: security around only releasing if + # these match? + log.devx( + f'TTY lock released requested\n\n' + f'{release_msg}\n' + ) + assert release_msg.cid == ctx.cid + assert release_msg.subactor_uid == tuple(subactor_uid) + + log.devx( + f'Actor {subactor_uid} released TTY lock' + ) + + return LockStatus( + subactor_uid=subactor_uid, + cid=ctx.cid, + locked=False, + ) + + except BaseException as req_err: + fail_reason: str = ( + f'on behalf of peer\n\n' + f'x)<=\n' + f' |_{subactor_task_uid!r}@{ctx.chan.uid!r}\n' + f'\n' + 'Forcing `Lock.release()` due to acquire failure!\n\n' + f'x)=>\n' + f' {ctx}' + ) + if isinstance(req_err, trio.Cancelled): + fail_reason = ( + 'Cancelled during stdio-mutex request ' + + + fail_reason + ) + else: + fail_reason = ( + 'Failed to deliver stdio-mutex request ' + + + fail_reason + ) + + log.exception(fail_reason) + Lock.release() + raise + + finally: + if lock_blocked: + Lock._blocked.remove(ctx.cid) + + # wakeup any waiters since the lock was (presumably) + # released, possibly only temporarily. + we_finished.set() + DebugStatus.unshield_sigint() + + +class DebugStateError(InternalError): + ''' + Something inconsistent or unexpected happend with a sub-actor's + debug mutex request to the root actor. + + ''' + + +# TODO: rename to ReplState or somethin? +# DebugRequest, make it a singleton instance? +class DebugStatus: + ''' + Singleton-state for debugging machinery in a subactor. + + Composes conc primitives for syncing with a root actor to + acquire the tree-global (TTY) `Lock` such that only ever one + actor's task can have the REPL active at a given time. + + Methods to shield the process' `SIGINT` handler are used + whenever a local task is an active REPL. + + ''' + # XXX local ref to the `pdbp.Pbp` instance, ONLY set in the + # actor-process that currently has activated a REPL i.e. it + # should be `None` (unset) in any other actor-process that does + # not yet have the `Lock` acquired via a root-actor debugger + # request. + repl: PdbREPL|None = None + + # any `repl_fixture` provided by user are entered and + # latered closed on `.release()` + _fixture_stack = ExitStack() + + # TODO: yet again this looks like a task outcome where we need + # to sync to the completion of one task (and get its result) + # being used everywhere for syncing.. + # -[ ] see if we can get our proto oco task-mngr to work for + # this? + repl_task: Task|None = None + # repl_thread: Thread|None = None + # ^TODO? + + repl_release: trio.Event|None = None + + req_task: Task|None = None + req_ctx: Context|None = None + req_cs: CancelScope|None = None + req_finished: trio.Event|None = None + req_err: BaseException|None = None + + lock_status: LockStatus|None = None + + _orig_sigint_handler: Callable|None = None + _trio_handler: ( + Callable[[int, FrameType|None], Any] + |int + | None + ) = None + + @classmethod + def repr(cls) -> str: + fields: str = ( + f'repl: {cls.repl}\n' + f'repl_task: {cls.repl_task}\n' + f'repl_release: {cls.repl_release}\n' + f'req_ctx: {cls.req_ctx}\n' + ) + body: str = textwrap.indent( + fields, + prefix=' |_', + ) + return ( + f'<{cls.__name__}(\n' + f'{body}' + ')>' + ) + + # TODO: how do you get this to work on a non-inited class? + # __repr__ = classmethod(repr) + # __str__ = classmethod(repr) + + @classmethod + def shield_sigint(cls): + ''' + Shield out SIGINT handling (which by default triggers + `Task` cancellation) in subactors when a `pdb` REPL + is active. + + Avoids cancellation of the current actor (task) when the user + mistakenly sends ctl-c or via a recevied signal (from an + external request). Explicit runtime cancel requests are + allowed until the current REPL-session (the blocking call + `Pdb.interaction()`) exits, normally via the 'continue' or + 'quit' command - at which point the orig SIGINT handler is + restored via `.unshield_sigint()` below. + + Impl notes: + ----------- + - we prefer that `trio`'s default handler is always used when + SIGINT is unshielded (hence disabling the `pdb.Pdb` + defaults in `mk_pdb()`) such that reliable KBI cancellation + is always enforced. + + - we always detect whether we're running from a non-main + thread, in which case schedule the SIGINT shielding override + to in the main thread as per, + + https://docs.python.org/3/library/signal.html#signals-and-threads + + ''' + from ._sigint import ( + sigint_shield, + ) + # + # XXX detect whether we're running from a non-main thread + # in which case schedule the SIGINT shielding override + # to in the main thread. + # https://docs.python.org/3/library/signal.html#signals-and-threads + if ( + not cls.is_main_trio_thread() + and + not _state._runtime_vars.get( + '_is_infected_aio', + False, + ) + ): + cls._orig_sigint_handler: Callable = trio.from_thread.run_sync( + signal.signal, + signal.SIGINT, + sigint_shield, + ) + + else: + cls._orig_sigint_handler = signal.signal( + signal.SIGINT, + sigint_shield, + ) + + @classmethod + @pdbp.hideframe # XXX NOTE XXX see below in `.pause_from_sync()` + def unshield_sigint(cls): + ''' + Un-shield SIGINT for REPL-active (su)bactor. + + See details in `.shield_sigint()`. + + ''' + # always restore ``trio``'s sigint handler. see notes below in + # the pdb factory about the nightmare that is that code swapping + # out the handler when the repl activates... + # if not cls.is_main_trio_thread(): + if ( + not cls.is_main_trio_thread() + and + not _state._runtime_vars.get( + '_is_infected_aio', + False, + ) + # not current_actor().is_infected_aio() + # ^XXX, since for bg-thr case will always raise.. + ): + trio.from_thread.run_sync( + signal.signal, + signal.SIGINT, + cls._trio_handler, + ) + else: + trio_h: Callable = cls._trio_handler + # XXX should never really happen XXX + if not trio_h: + from ._repl import mk_pdb + mk_pdb().set_trace() + + signal.signal( + signal.SIGINT, + cls._trio_handler, + ) + + cls._orig_sigint_handler = None + + @classmethod + def is_main_trio_thread(cls) -> bool: + ''' + Check if we're the "main" thread (as in the first one + started by cpython) AND that it is ALSO the thread that + called `trio.run()` and not some thread spawned with + `trio.to_thread.run_sync()`. + + ''' + try: + async_lib: str = sniffio.current_async_library() + except sniffio.AsyncLibraryNotFoundError: + async_lib = None + + is_main_thread: bool = trio._util.is_main_thread() + # ^TODO, since this is private, @oremanj says + # we should just copy the impl for now..? + if is_main_thread: + thread_name: str = 'main' + else: + thread_name: str = threading.current_thread().name + + is_trio_main = ( + is_main_thread + and + (async_lib == 'trio') + ) + + report: str = f'Running thread: {thread_name!r}\n' + if async_lib: + report += ( + f'Current async-lib detected by `sniffio`: {async_lib}\n' + ) + else: + report += ( + 'No async-lib detected (by `sniffio`) ??\n' + ) + if not is_trio_main: + log.warning(report) + + return is_trio_main + # XXX apparently unreliable..see ^ + # ( + # threading.current_thread() + # is not threading.main_thread() + # ) + + @classmethod + def cancel(cls) -> bool: + if (req_cs := cls.req_cs): + req_cs.cancel() + return True + + return False + + # TODO, support @acm? + # -[ ] what about a return-proto for determining + # whether the REPL should be allowed to enage? + # -[x] consider factoring this `_repl_fixture` block into + # a common @cm somehow so it can be repurposed both here and + # in `._pause()`?? + # -[ ] we could also use the `ContextDecorator`-type in that + # case to simply decorate the `_enter_repl_sync()` closure? + # |_https://docs.python.org/3/library/contextlib.html#using-a-context-manager-as-a-function-decorator + @classmethod + def maybe_enter_repl_fixture( + cls, + # ^XXX **always provided** by the low-level REPL-invoker, + # - _post_mortem() + # - _pause() + repl: PdbREPL, + + # maybe pre/post REPL entry + repl_fixture: ( + AbstractContextManager[bool] + |None + ) = None, + + # if called from crashed context, provided by + # `open_crash_handler()` + boxed_maybe_exc: BoxedMaybeException|None = None, + ) -> bool: + ''' + Maybe open a pre/post REPL entry "fixture" `@cm` provided by the + user, the caller should use the delivered `bool` to determine + whether to engage the `PdbREPL`. + + ''' + if not ( + repl_fixture + or + (rt_repl_fixture := _state._runtime_vars.get('repl_fixture')) + ): + return True # YES always enter + + _repl_fixture = ( + repl_fixture + or + rt_repl_fixture + ) + enter_repl: bool = DebugStatus._fixture_stack.enter_context( + _repl_fixture( + repl=repl, + maybe_bxerr=boxed_maybe_exc, + ) + ) + if not enter_repl: + log.pdb( + f'pdbp-REPL blocked by a `repl_fixture()` which yielded `False` !\n' + f'repl_fixture: {repl_fixture}\n' + f'rt_repl_fixture: {rt_repl_fixture}\n' + ) + + log.devx( + f'User provided `repl_fixture` entered with,\n' + f'{repl_fixture!r} -> {enter_repl!r}\n' + ) + return enter_repl + + @classmethod + # @pdbp.hideframe + def release( + cls, + cancel_req_task: bool = False, + ): + repl_release: trio.Event = cls.repl_release + try: + # sometimes the task might already be terminated in + # which case this call will raise an RTE? + # See below for reporting on that.. + if ( + repl_release is not None + and + not repl_release.is_set() + ): + if cls.is_main_trio_thread(): + repl_release.set() + + elif ( + _state._runtime_vars.get( + '_is_infected_aio', + False, + ) + # ^XXX, again bc we need to not except + # but for bg-thread case it will always raise.. + # + # TODO, is there a better api then using + # `err_on_no_runtime=False` in the below? + # current_actor().is_infected_aio() + ): + async def _set_repl_release(): + repl_release.set() + + fute: asyncio.Future = run_trio_task_in_future( + _set_repl_release + ) + if not fute.done(): + log.warning('REPL release state unknown..?') + + else: + # XXX NOTE ONLY used for bg root-actor sync + # threads, see `.pause_from_sync()`. + trio.from_thread.run_sync( + repl_release.set + ) + + except RuntimeError as rte: + log.exception( + f'Failed to release debug-request ??\n\n' + f'{cls.repr()}\n' + ) + # pdbp.set_trace() + raise rte + + finally: + # if req_ctx := cls.req_ctx: + # req_ctx._scope.cancel() + if cancel_req_task: + cancelled: bool = cls.cancel() + if not cancelled: + log.warning( + 'Failed to cancel request task!?\n' + f'{cls.repl_task}\n' + ) + + # actor-local state, irrelevant for non-root. + cls.repl_task = None + + # XXX WARNING needs very special caughtion, and we should + # prolly make a more explicit `@property` API? + # + # - if unset in root multi-threaded case can cause + # issues with detecting that some root thread is + # using a REPL, + # + # - what benefit is there to unsetting, it's always + # set again for the next task in some actor.. + # only thing would be to avoid in the sigint-handler + # logging when we don't need to? + cls.repl = None + + # maybe restore original sigint handler + # XXX requires runtime check to avoid crash! + if current_actor(err_on_no_runtime=False): + cls.unshield_sigint() + + cls._fixture_stack.close() + + +# TODO: use the new `@lowlevel.singleton` for this! +def get_debug_req() -> DebugStatus|None: + return DebugStatus + + +async def request_root_stdio_lock( + actor_uid: tuple[str, str], + task_uid: tuple[str, int], + + shield: bool = False, + task_status: TaskStatus[CancelScope] = trio.TASK_STATUS_IGNORED, +): + ''' + Connect to the root actor for this actor's process tree and + RPC-invoke a task which acquires the std-streams global `Lock`: + a process-tree-global mutex which prevents multiple actors from + entering `PdbREPL.interaction()` at the same time such that the + parent TTY's stdio is never "clobbered" by simultaneous + reads/writes. + + The actual `Lock` singleton instance exists ONLY in the root + actor's memory space and does nothing more then manage + process-tree global state, + namely a `._debug_lock: trio.FIFOLock`. + + The actual `PdbREPL` interaction/operation is completely isolated + to each sub-actor (process) with the root's `Lock` providing the + multi-process mutex-syncing mechanism to avoid parallel REPL + usage within an actor tree. + + ''' + log.devx( + 'Initing stdio-lock request task with root actor' + ) + # TODO: can we implement this mutex more generally as + # a `._sync.Lock`? + # -[ ] simply add the wrapping needed for the debugger specifics? + # - the `__pld_spec__` impl and maybe better APIs for the client + # vs. server side state tracking? (`Lock` + `DebugStatus`) + # -[ ] for eg. `mp` has a multi-proc lock via the manager + # - https://docs.python.org/3.8/library/multiprocessing.html#synchronization-primitives + # -[ ] technically we need a `RLock` since re-acquire should be a noop + # - https://docs.python.org/3.8/library/multiprocessing.html#multiprocessing.RLock + DebugStatus.req_finished = trio.Event() + DebugStatus.req_task = current_task() + req_err: BaseException|None = None + try: + from tractor._discovery import get_root + # NOTE: we need this to ensure that this task exits + # BEFORE the REPl instance raises an error like + # `bdb.BdbQuit` directly, OW you get a trio cs stack + # corruption! + # Further, the since this task is spawned inside the + # `Context._scope_nursery: trio.Nursery`, once an RPC + # task errors that cs is cancel_called and so if we want + # to debug the TPC task that failed we need to shield + # against that expected `.cancel()` call and instead + # expect all of the `PdbREPL`.set_[continue/quit/]()` + # methods to unblock this task by setting the + # `.repl_release: # trio.Event`. + with trio.CancelScope(shield=shield) as req_cs: + # XXX: was orig for debugging cs stack corruption.. + # log.devx( + # 'Request cancel-scope is:\n\n' + # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' + # ) + DebugStatus.req_cs = req_cs + req_ctx: Context|None = None + ctx_eg: BaseExceptionGroup|None = None + try: + # TODO: merge into single async with ? + async with get_root() as portal: + async with portal.open_context( + lock_stdio_for_peer, + subactor_task_uid=task_uid, + + # NOTE: set it here in the locker request task bc it's + # possible for multiple such requests for the lock in any + # single sub-actor AND there will be a race between when the + # root locking task delivers the `Started(pld=LockStatus)` + # and when the REPL is actually entered by the requesting + # application task who called + # `.pause()`/`.post_mortem()`. + # + # SO, applying the pld-spec here means it is only applied to + # this IPC-ctx request task, NOT any other task(s) + # including the one that actually enters the REPL. This + # is oc desired bc ow the debugged task will msg-type-error. + # pld_spec=__pld_spec__, + + ) as (req_ctx, status): + + DebugStatus.req_ctx = req_ctx + log.devx( + 'Subactor locked TTY with msg\n\n' + f'{status}\n' + ) + + # try: + if (locker := status.subactor_uid) != actor_uid: + raise DebugStateError( + f'Root actor locked by another peer !?\n' + f'locker: {locker!r}\n' + f'actor_uid: {actor_uid}\n' + ) + assert status.cid + # except AttributeError: + # log.exception('failed pldspec asserts!') + # mk_pdb().set_trace() + # raise + + # set last rxed lock dialog status. + DebugStatus.lock_status = status + + async with req_ctx.open_stream() as stream: + task_status.started(req_ctx) + + # wait for local task to exit + # `PdbREPL.interaction()`, normally via + # a `DebugStatus.release()`call, and + # then unblock us here. + await DebugStatus.repl_release.wait() + await stream.send( + LockRelease( + subactor_uid=actor_uid, + cid=status.cid, + ) + ) + + # sync with child-side root locker task + # completion + status: LockStatus = await req_ctx.result() + assert not status.locked + DebugStatus.lock_status = status + + log.devx( + 'TTY lock was released for subactor with msg\n\n' + f'{status}\n\n' + f'Exitting {req_ctx.side!r}-side of locking req_ctx\n' + ) + + except* ( + tractor.ContextCancelled, + trio.Cancelled, + ) as _taskc_eg: + ctx_eg = _taskc_eg + log.cancel( + 'Debug lock request was CANCELLED?\n\n' + f'<=c) {req_ctx}\n' + # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' + # f'{pformat_cs(req_ctx._scope, var_name="req_ctx._scope")}\n\n' + ) + raise + + except* ( + BaseException, + ) as _ctx_eg: + ctx_eg = _ctx_eg + message: str = ( + 'Failed during debug request dialog with root actor?\n' + ) + if (req_ctx := DebugStatus.req_ctx): + message += ( + f'<=x)\n' + f' |_{req_ctx}\n' + f'Cancelling IPC ctx!\n' + ) + try: + await req_ctx.cancel() + except trio.ClosedResourceError as terr: + ctx_eg.add_note( + # f'Failed with {type(terr)!r} x)> `req_ctx.cancel()` ' + f'Failed with `req_ctx.cancel()` bool: + ''' + Predicate to determine if a reported child subactor in debug + is actually connected. + + Useful to detect stale `Lock` requests after IPC failure. + + ''' + actor: Actor = current_actor() + server: IPCServer = actor.ipc_server + + if not is_root_process(): + raise InternalError('This is a root-actor only API!') + + if ( + (ctx := Lock.ctx_in_debug) + and + (uid_in_debug := ctx.chan.uid) + ): + chans: list[tractor.Channel] = server._peers.get( + tuple(uid_in_debug) + ) + if chans: + return any( + chan.connected() + for chan in chans + ) + + return False diff --git a/tractor/devx/pformat.py b/tractor/devx/pformat.py index 1530ef02..b9e1ca48 100644 --- a/tractor/devx/pformat.py +++ b/tractor/devx/pformat.py @@ -19,6 +19,7 @@ Pretty formatters for use throughout the code base. Mostly handy for logging and exception message content. ''' +import sys import textwrap import traceback @@ -115,6 +116,85 @@ def pformat_boxed_tb( ) +def pformat_exc( + exc: Exception, + header: str = '', + message: str = '', + body: str = '', + with_type_header: bool = True, +) -> str: + + # XXX when the currently raised exception is this instance, + # we do not ever use the "type header" style repr. + is_being_raised: bool = False + if ( + (curr_exc := sys.exception()) + and + curr_exc is exc + ): + is_being_raised: bool = True + + with_type_header: bool = ( + with_type_header + and + not is_being_raised + ) + + # style + if ( + with_type_header + and + not header + ): + header: str = f'<{type(exc).__name__}(' + + message: str = ( + message + or + exc.message + ) + if message: + # split off the first line so, if needed, it isn't + # indented the same like the "boxed content" which + # since there is no `.tb_str` is just the `.message`. + lines: list[str] = message.splitlines() + first: str = lines[0] + message: str = message.removeprefix(first) + + # with a type-style header we, + # - have no special message "first line" extraction/handling + # - place the message a space in from the header: + # `MsgTypeError( ..` + # ^-here + # - indent the `.message` inside the type body. + if with_type_header: + first = f' {first} )>' + + message: str = textwrap.indent( + message, + prefix=' '*2, + ) + message: str = first + message + + tail: str = '' + if ( + with_type_header + and + not message + ): + tail: str = '>' + + return ( + header + + + message + + + f'{body}' + + + tail + ) + + def pformat_caller_frame( stack_limit: int = 1, box_tb: bool = True, @@ -167,3 +247,104 @@ def pformat_cs( + fields ) + + +# TODO: move this func to some kinda `.devx.pformat.py` eventually +# as we work out our multi-domain state-flow-syntax! +def nest_from_op( + input_op: str, + # + # ?TODO? an idea for a syntax to the state of concurrent systems + # as a "3-domain" (execution, scope, storage) model and using + # a minimal ascii/utf-8 operator-set. + # + # try not to take any of this seriously yet XD + # + # > is a "play operator" indicating (CPU bound) + # exec/work/ops required at the "lowest level computing" + # + # execution primititves (tasks, threads, actors..) denote their + # lifetime with '(' and ')' since parentheses normally are used + # in many langs to denote function calls. + # + # starting = ( + # >( opening/starting; beginning of the thread-of-exec (toe?) + # (> opened/started, (finished spawning toe) + # |_ repr of toe, in py these look like + # + # >) closing/exiting/stopping, + # )> closed/exited/stopped, + # |_ + # [OR <), )< ?? ] + # + # ending = ) + # >c) cancelling to close/exit + # c)> cancelled (caused close), OR? + # |_ + # OR maybe "x) erroring to eventuall exit + # x)> errored and terminated + # |_ + # + # scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc. + # >{ opening + # {> opened + # }> closed + # >} closing + # + # storage: like queues, shm-buffers, files, etc.. + # >[ opening + # [> opened + # |_ + # + # >] closing + # ]> closed + + # IPC ops: channels, transports, msging + # => req msg + # <= resp msg + # <=> 2-way streaming (of msgs) + # <- recv 1 msg + # -> send 1 msg + # + # TODO: still not sure on R/L-HS approach..? + # =>( send-req to exec start (task, actor, thread..) + # (<= recv-req to ^ + # + # (<= recv-req ^ + # <=( recv-resp opened remote exec primitive + # <=) recv-resp closed + # + # )<=c req to stop due to cancel + # c=>) req to stop due to cancel + # + # =>{ recv-req to open + # <={ send-status that it closed + + tree_str: str, + + # NOTE: so move back-from-the-left of the `input_op` by + # this amount. + back_from_op: int = 0, +) -> str: + ''' + Depth-increment the input (presumably hierarchy/supervision) + input "tree string" below the provided `input_op` execution + operator, so injecting a `"\n|_{input_op}\n"`and indenting the + `tree_str` to nest content aligned with the ops last char. + + ''' + return ( + f'{input_op}\n' + + + textwrap.indent( + tree_str, + prefix=( + len(input_op) + - + (back_from_op + 1) + ) * ' ', + ) + ) diff --git a/tractor/experimental/_pubsub.py b/tractor/experimental/_pubsub.py index b894ed49..bc5881e1 100644 --- a/tractor/experimental/_pubsub.py +++ b/tractor/experimental/_pubsub.py @@ -45,6 +45,8 @@ __all__ = ['pub'] log = get_logger('messaging') +# TODO! this needs to reworked to use the modern +# `Context`/`MsgStream` APIs!! async def fan_out_to_ctxs( pub_async_gen_func: typing.Callable, # it's an async gen ... gd mypy topics2ctxs: dict[str, list], diff --git a/tractor/ipc/__init__.py b/tractor/ipc/__init__.py new file mode 100644 index 00000000..2c6c3b5d --- /dev/null +++ b/tractor/ipc/__init__.py @@ -0,0 +1,24 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +A modular IPC layer supporting the power of cross-process SC! + +''' +from ._chan import ( + _connect_chan as _connect_chan, + Channel as Channel +) diff --git a/tractor/ipc/_chan.py b/tractor/ipc/_chan.py new file mode 100644 index 00000000..2c3374c2 --- /dev/null +++ b/tractor/ipc/_chan.py @@ -0,0 +1,457 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +Inter-process comms abstractions + +""" +from __future__ import annotations +from collections.abc import AsyncGenerator +from contextlib import ( + asynccontextmanager as acm, + contextmanager as cm, +) +import platform +from pprint import pformat +import typing +from typing import ( + Any, + TYPE_CHECKING, +) +import warnings + +import trio + +from ._types import ( + transport_from_addr, + transport_from_stream, +) +from tractor._addr import ( + is_wrapped_addr, + wrap_address, + Address, + UnwrappedAddress, +) +from tractor.log import get_logger +from tractor._exceptions import ( + MsgTypeError, + pack_from_raise, + TransportClosed, +) +from tractor.msg import ( + Aid, + MsgCodec, +) + +if TYPE_CHECKING: + from ._transport import MsgTransport + + +log = get_logger(__name__) + +_is_windows = platform.system() == 'Windows' + + +class Channel: + ''' + An inter-process channel for communication between (remote) actors. + + Wraps a ``MsgStream``: transport + encoding IPC connection. + + Currently we only support ``trio.SocketStream`` for transport + (aka TCP) and the ``msgpack`` interchange format via the ``msgspec`` + codec libary. + + ''' + def __init__( + + self, + transport: MsgTransport|None = None, + # TODO: optional reconnection support? + # auto_reconnect: bool = False, + # on_reconnect: typing.Callable[..., typing.Awaitable] = None, + + ) -> None: + + # self._recon_seq = on_reconnect + # self._autorecon = auto_reconnect + + # Either created in ``.connect()`` or passed in by + # user in ``.from_stream()``. + self._transport: MsgTransport|None = transport + + # set after handshake - always info from peer end + self.aid: Aid|None = None + + self._aiter_msgs = self._iter_msgs() + self._exc: Exception|None = None + # ^XXX! ONLY set if a remote actor sends an `Error`-msg + self._closed: bool = False + + # flag set by ``Portal.cancel_actor()`` indicating remote + # (possibly peer) cancellation of the far end actor + # runtime. + self._cancel_called: bool = False + + @property + def uid(self) -> tuple[str, str]: + ''' + Peer actor's unique id. + + ''' + msg: str = ( + f'`{type(self).__name__}.uid` is now deprecated.\n' + 'Use the new `.aid: tractor.msg.Aid` (struct) instead ' + 'which also provides additional named (optional) fields ' + 'beyond just the `.name` and `.uuid`.' + ) + warnings.warn( + msg, + DeprecationWarning, + stacklevel=2, + ) + peer_aid: Aid = self.aid + return ( + peer_aid.name, + peer_aid.uuid, + ) + + @property + def stream(self) -> trio.abc.Stream | None: + return self._transport.stream if self._transport else None + + @property + def msgstream(self) -> MsgTransport: + log.info( + '`Channel.msgstream` is an old name, use `._transport`' + ) + return self._transport + + @property + def transport(self) -> MsgTransport: + return self._transport + + @classmethod + def from_stream( + cls, + stream: trio.abc.Stream, + ) -> Channel: + transport_cls = transport_from_stream(stream) + return Channel( + transport=transport_cls(stream) + ) + + @classmethod + async def from_addr( + cls, + addr: UnwrappedAddress, + **kwargs + ) -> Channel: + + if not is_wrapped_addr(addr): + addr: Address = wrap_address(addr) + + transport_cls = transport_from_addr(addr) + transport = await transport_cls.connect_to( + addr, + **kwargs, + ) + assert transport.raddr == addr + chan = Channel(transport=transport) + log.runtime( + f'Connected channel IPC transport\n' + f'[>\n' + f' |_{chan}\n' + ) + return chan + + @cm + def apply_codec( + self, + codec: MsgCodec, + ) -> None: + ''' + Temporarily override the underlying IPC msg codec for + dynamic enforcement of messaging schema. + + ''' + orig: MsgCodec = self._transport.codec + try: + self._transport.codec = codec + yield + finally: + self._transport.codec = orig + + # TODO: do a .src/.dst: str for maddrs? + def pformat(self) -> str: + if not self._transport: + return '' + + tpt: MsgTransport = self._transport + tpt_name: str = type(tpt).__name__ + tpt_status: str = ( + 'connected' if self.connected() + else 'closed' + ) + return ( + f'\n' + ) + + # NOTE: making this return a value that can be passed to + # `eval()` is entirely **optional** FYI! + # https://docs.python.org/3/library/functions.html#repr + # https://docs.python.org/3/reference/datamodel.html#object.__repr__ + # + # Currently we target **readability** from a (console) + # logging perspective over `eval()`-ability since we do NOT + # target serializing non-struct instances! + # def __repr__(self) -> str: + __str__ = pformat + __repr__ = pformat + + @property + def laddr(self) -> Address|None: + return self._transport.laddr if self._transport else None + + @property + def raddr(self) -> Address|None: + return self._transport.raddr if self._transport else None + + # TODO: something like, + # `pdbp.hideframe_on(errors=[MsgTypeError])` + # instead of the `try/except` hack we have rn.. + # seems like a pretty useful thing to have in general + # along with being able to filter certain stack frame(s / sets) + # possibly based on the current log-level? + async def send( + self, + payload: Any, + + hide_tb: bool = True, + + ) -> None: + ''' + Send a coded msg-blob over the transport. + + ''' + __tracebackhide__: bool = hide_tb + try: + log.transport( + '=> send IPC msg:\n\n' + f'{pformat(payload)}\n' + ) + # assert self._transport # but why typing? + await self._transport.send( + payload, + hide_tb=hide_tb, + ) + except ( + BaseException, + MsgTypeError, + TransportClosed, + )as _err: + err = _err # bind for introspection + match err: + case MsgTypeError(): + try: + assert err.cid + except KeyError: + raise err + case TransportClosed(): + log.transport( + f'Transport stream closed due to\n' + f'{err.repr_src_exc()}\n' + ) + + case _: + # never suppress non-tpt sources + __tracebackhide__: bool = False + raise + + async def recv(self) -> Any: + assert self._transport + return await self._transport.recv() + + # TODO: auto-reconnect features like 0mq/nanomsg? + # -[ ] implement it manually with nods to SC prot + # possibly on multiple transport backends? + # -> seems like that might be re-inventing scalability + # prots tho no? + # try: + # return await self._transport.recv() + # except trio.BrokenResourceError: + # if self._autorecon: + # await self._reconnect() + # return await self.recv() + # raise + + async def aclose(self) -> None: + + log.transport( + f'Closing channel to {self.aid} ' + f'{self.laddr} -> {self.raddr}' + ) + assert self._transport + await self._transport.stream.aclose() + self._closed = True + + async def __aenter__(self): + await self.connect() + return self + + async def __aexit__(self, *args): + await self.aclose(*args) + + def __aiter__(self): + return self._aiter_msgs + + # ?TODO? run any reconnection sequence? + # -[ ] prolly should be impl-ed as deco-API? + # + # async def _reconnect(self) -> None: + # """Handle connection failures by polling until a reconnect can be + # established. + # """ + # down = False + # while True: + # try: + # with trio.move_on_after(3) as cancel_scope: + # await self.connect() + # cancelled = cancel_scope.cancelled_caught + # if cancelled: + # log.transport( + # "Reconnect timed out after 3 seconds, retrying...") + # continue + # else: + # log.transport("Stream connection re-established!") + + # # on_recon = self._recon_seq + # # if on_recon: + # # await on_recon(self) + + # break + # except (OSError, ConnectionRefusedError): + # if not down: + # down = True + # log.transport( + # f"Connection to {self.raddr} went down, waiting" + # " for re-establishment") + # await trio.sleep(1) + + async def _iter_msgs( + self + ) -> AsyncGenerator[Any, None]: + ''' + Yield `MsgType` IPC msgs decoded and deliverd from + an underlying `MsgTransport` protocol. + + This is a streaming routine alo implemented as an async-gen + func (same a `MsgTransport._iter_pkts()`) gets allocated by + a `.__call__()` inside `.__init__()` where it is assigned to + the `._aiter_msgs` attr. + + ''' + assert self._transport + while True: + try: + async for msg in self._transport: + match msg: + # NOTE: if transport/interchange delivers + # a type error, we pack it with the far + # end peer `Actor.uid` and relay the + # `Error`-msg upward to the `._rpc` stack + # for normal RAE handling. + case MsgTypeError(): + yield pack_from_raise( + local_err=msg, + cid=msg.cid, + + # XXX we pack it here bc lower + # layers have no notion of an + # actor-id ;) + src_uid=self.uid, + ) + case _: + yield msg + + except trio.BrokenResourceError: + + # if not self._autorecon: + raise + + await self.aclose() + + # if self._autorecon: # attempt reconnect + # await self._reconnect() + # continue + + def connected(self) -> bool: + return self._transport.connected() if self._transport else False + + async def _do_handshake( + self, + aid: Aid, + + ) -> Aid: + ''' + Exchange `(name, UUIDs)` identifiers as the first + communication step with any (peer) remote `Actor`. + + These are essentially the "mailbox addresses" found in + "actor model" parlance. + + ''' + await self.send(aid) + peer_aid: Aid = await self.recv() + log.runtime( + f'Received hanshake with peer actor,\n' + f'{peer_aid}\n' + ) + # NOTE, we always are referencing the remote peer! + self.aid = peer_aid + return peer_aid + + +@acm +async def _connect_chan( + addr: UnwrappedAddress +) -> typing.AsyncGenerator[Channel, None]: + ''' + Create and connect a channel with disconnect on context manager + teardown. + + ''' + chan = await Channel.from_addr(addr) + yield chan + with trio.CancelScope(shield=True): + await chan.aclose() diff --git a/tractor/ipc/_fd_share.py b/tractor/ipc/_fd_share.py new file mode 100644 index 00000000..e51069ba --- /dev/null +++ b/tractor/ipc/_fd_share.py @@ -0,0 +1,163 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +File-descriptor-sharing on `linux` by "wilhelm_of_bohemia". + +''' +from __future__ import annotations +import os +import array +import socket +import tempfile +from pathlib import Path +from contextlib import ExitStack + +import trio +import tractor +from tractor.ipc import RBToken + + +actor_name = 'ringd' + + +_rings: dict[str, dict] = {} + + +async def _attach_to_ring( + ring_name: str +) -> tuple[int, int, int]: + actor = tractor.current_actor() + + fd_amount = 3 + sock_path = ( + Path(tempfile.gettempdir()) + / + f'{os.getpid()}-pass-ring-fds-{ring_name}-to-{actor.name}.sock' + ) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.bind(sock_path) + sock.listen(1) + + async with ( + tractor.find_actor(actor_name) as ringd, + ringd.open_context( + _pass_fds, + name=ring_name, + sock_path=sock_path + ) as (ctx, _sent) + ): + # prepare array to receive FD + fds = array.array("i", [0] * fd_amount) + + conn, _ = sock.accept() + + # receive FD + msg, ancdata, flags, addr = conn.recvmsg( + 1024, + socket.CMSG_LEN(fds.itemsize * fd_amount) + ) + + for ( + cmsg_level, + cmsg_type, + cmsg_data, + ) in ancdata: + if ( + cmsg_level == socket.SOL_SOCKET + and + cmsg_type == socket.SCM_RIGHTS + ): + fds.frombytes(cmsg_data[:fds.itemsize * fd_amount]) + break + else: + raise RuntimeError("Receiver: No FDs received") + + conn.close() + sock.close() + sock_path.unlink() + + return RBToken.from_msg( + await ctx.wait_for_result() + ) + + +@tractor.context +async def _pass_fds( + ctx: tractor.Context, + name: str, + sock_path: str +) -> RBToken: + global _rings + token = _rings[name] + client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + client.connect(sock_path) + await ctx.started() + fds = array.array('i', token.fds) + client.sendmsg([b'FDs'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + client.close() + return token + + +@tractor.context +async def _open_ringbuf( + ctx: tractor.Context, + name: str, + buf_size: int +) -> RBToken: + global _rings + is_owner = False + if name not in _rings: + stack = ExitStack() + token = stack.enter_context( + tractor.open_ringbuf( + name, + buf_size=buf_size + ) + ) + _rings[name] = { + 'token': token, + 'stack': stack, + } + is_owner = True + + ring = _rings[name] + await ctx.started() + + try: + await trio.sleep_forever() + + except tractor.ContextCancelled: + ... + + finally: + if is_owner: + ring['stack'].close() + + +async def open_ringbuf( + name: str, + buf_size: int +) -> RBToken: + async with ( + tractor.find_actor(actor_name) as ringd, + ringd.open_context( + _open_ringbuf, + name=name, + buf_size=buf_size + ) as (rd_ctx, _) + ): + yield await _attach_to_ring(name) + await rd_ctx.cancel() diff --git a/tractor/ipc/_linux.py b/tractor/ipc/_linux.py new file mode 100644 index 00000000..88d80d1c --- /dev/null +++ b/tractor/ipc/_linux.py @@ -0,0 +1,153 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +Linux specifics, for now we are only exposing EventFD + +''' +import os +import errno + +import cffi +import trio + +ffi = cffi.FFI() + +# Declare the C functions and types we plan to use. +# - eventfd: for creating the event file descriptor +# - write: for writing to the file descriptor +# - read: for reading from the file descriptor +# - close: for closing the file descriptor +ffi.cdef( + ''' + int eventfd(unsigned int initval, int flags); + + ssize_t write(int fd, const void *buf, size_t count); + ssize_t read(int fd, void *buf, size_t count); + + int close(int fd); + ''' +) + + +# Open the default dynamic library (essentially 'libc' in most cases) +C = ffi.dlopen(None) + + +# Constants from , if needed. +EFD_SEMAPHORE = 1 +EFD_CLOEXEC = 0o2000000 +EFD_NONBLOCK = 0o4000 + + +def open_eventfd(initval: int = 0, flags: int = 0) -> int: + ''' + Open an eventfd with the given initial value and flags. + Returns the file descriptor on success, otherwise raises OSError. + + ''' + fd = C.eventfd(initval, flags) + if fd < 0: + raise OSError(errno.errorcode[ffi.errno], 'eventfd failed') + return fd + + +def write_eventfd(fd: int, value: int) -> int: + ''' + Write a 64-bit integer (uint64_t) to the eventfd's counter. + + ''' + # Create a uint64_t* in C, store `value` + data_ptr = ffi.new('uint64_t *', value) + + # Call write(fd, data_ptr, 8) + # We expect to write exactly 8 bytes (sizeof(uint64_t)) + ret = C.write(fd, data_ptr, 8) + if ret < 0: + raise OSError(errno.errorcode[ffi.errno], 'write to eventfd failed') + return ret + + +def read_eventfd(fd: int) -> int: + ''' + Read a 64-bit integer (uint64_t) from the eventfd, returning the value. + Reading resets the counter to 0 (unless using EFD_SEMAPHORE). + + ''' + # Allocate an 8-byte buffer in C for reading + buf = ffi.new('char[]', 8) + + ret = C.read(fd, buf, 8) + if ret < 0: + raise OSError(errno.errorcode[ffi.errno], 'read from eventfd failed') + # Convert the 8 bytes we read into a Python integer + data_bytes = ffi.unpack(buf, 8) # returns a Python bytes object of length 8 + value = int.from_bytes(data_bytes, byteorder='little', signed=False) + return value + + +def close_eventfd(fd: int) -> int: + ''' + Close the eventfd. + + ''' + ret = C.close(fd) + if ret < 0: + raise OSError(errno.errorcode[ffi.errno], 'close failed') + + +class EventFD: + ''' + Use a previously opened eventfd(2), meant to be used in + sub-actors after root actor opens the eventfds then passes + them through pass_fds + + ''' + + def __init__( + self, + fd: int, + omode: str + ): + self._fd: int = fd + self._omode: str = omode + self._fobj = None + + @property + def fd(self) -> int | None: + return self._fd + + def write(self, value: int) -> int: + return write_eventfd(self._fd, value) + + async def read(self) -> int: + return await trio.to_thread.run_sync( + read_eventfd, self._fd, + abandon_on_cancel=True + ) + + def open(self): + self._fobj = os.fdopen(self._fd, self._omode) + + def close(self): + if self._fobj: + self._fobj.close() + + def __enter__(self): + self.open() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() diff --git a/tractor/ipc/_mp_bs.py b/tractor/ipc/_mp_bs.py new file mode 100644 index 00000000..e51aa9ae --- /dev/null +++ b/tractor/ipc/_mp_bs.py @@ -0,0 +1,45 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +Utils to tame mp non-SC madeness + +''' +def disable_mantracker(): + ''' + Disable all ``multiprocessing``` "resource tracking" machinery since + it's an absolute multi-threaded mess of non-SC madness. + + ''' + from multiprocessing import resource_tracker as mantracker + + # Tell the "resource tracker" thing to fuck off. + class ManTracker(mantracker.ResourceTracker): + def register(self, name, rtype): + pass + + def unregister(self, name, rtype): + pass + + def ensure_running(self): + pass + + # "know your land and know your prey" + # https://www.dailymotion.com/video/x6ozzco + mantracker._resource_tracker = ManTracker() + mantracker.register = mantracker._resource_tracker.register + mantracker.ensure_running = mantracker._resource_tracker.ensure_running + mantracker.unregister = mantracker._resource_tracker.unregister + mantracker.getfd = mantracker._resource_tracker.getfd diff --git a/tractor/ipc/_ringbuf.py b/tractor/ipc/_ringbuf.py new file mode 100644 index 00000000..6337eea1 --- /dev/null +++ b/tractor/ipc/_ringbuf.py @@ -0,0 +1,253 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +IPC Reliable RingBuffer implementation + +''' +from __future__ import annotations +from contextlib import contextmanager as cm +from multiprocessing.shared_memory import SharedMemory + +import trio +from msgspec import ( + Struct, + to_builtins +) + +from ._linux import ( + EFD_NONBLOCK, + open_eventfd, + EventFD +) +from ._mp_bs import disable_mantracker + + +disable_mantracker() + + +class RBToken(Struct, frozen=True): + ''' + RingBuffer token contains necesary info to open the two + eventfds and the shared memory + + ''' + shm_name: str + write_eventfd: int + wrap_eventfd: int + buf_size: int + + def as_msg(self): + return to_builtins(self) + + @classmethod + def from_msg(cls, msg: dict) -> RBToken: + if isinstance(msg, RBToken): + return msg + + return RBToken(**msg) + + +@cm +def open_ringbuf( + shm_name: str, + buf_size: int = 10 * 1024, + write_efd_flags: int = 0, + wrap_efd_flags: int = 0 +) -> RBToken: + shm = SharedMemory( + name=shm_name, + size=buf_size, + create=True + ) + try: + token = RBToken( + shm_name=shm_name, + write_eventfd=open_eventfd(flags=write_efd_flags), + wrap_eventfd=open_eventfd(flags=wrap_efd_flags), + buf_size=buf_size + ) + yield token + + finally: + shm.unlink() + + +class RingBuffSender(trio.abc.SendStream): + ''' + IPC Reliable Ring Buffer sender side implementation + + `eventfd(2)` is used for wrap around sync, and also to signal + writes to the reader. + + ''' + def __init__( + self, + token: RBToken, + start_ptr: int = 0, + ): + token = RBToken.from_msg(token) + self._shm = SharedMemory( + name=token.shm_name, + size=token.buf_size, + create=False + ) + self._write_event = EventFD(token.write_eventfd, 'w') + self._wrap_event = EventFD(token.wrap_eventfd, 'r') + self._ptr = start_ptr + + @property + def key(self) -> str: + return self._shm.name + + @property + def size(self) -> int: + return self._shm.size + + @property + def ptr(self) -> int: + return self._ptr + + @property + def write_fd(self) -> int: + return self._write_event.fd + + @property + def wrap_fd(self) -> int: + return self._wrap_event.fd + + async def send_all(self, data: bytes | bytearray | memoryview): + # while data is larger than the remaining buf + target_ptr = self.ptr + len(data) + while target_ptr > self.size: + # write all bytes that fit + remaining = self.size - self.ptr + self._shm.buf[self.ptr:] = data[:remaining] + # signal write and wait for reader wrap around + self._write_event.write(remaining) + await self._wrap_event.read() + + # wrap around and trim already written bytes + self._ptr = 0 + data = data[remaining:] + target_ptr = self._ptr + len(data) + + # remaining data fits on buffer + self._shm.buf[self.ptr:target_ptr] = data + self._write_event.write(len(data)) + self._ptr = target_ptr + + async def wait_send_all_might_not_block(self): + raise NotImplementedError + + async def aclose(self): + self._write_event.close() + self._wrap_event.close() + self._shm.close() + + async def __aenter__(self): + self._write_event.open() + self._wrap_event.open() + return self + + +class RingBuffReceiver(trio.abc.ReceiveStream): + ''' + IPC Reliable Ring Buffer receiver side implementation + + `eventfd(2)` is used for wrap around sync, and also to signal + writes to the reader. + + ''' + def __init__( + self, + token: RBToken, + start_ptr: int = 0, + flags: int = 0 + ): + token = RBToken.from_msg(token) + self._shm = SharedMemory( + name=token.shm_name, + size=token.buf_size, + create=False + ) + self._write_event = EventFD(token.write_eventfd, 'w') + self._wrap_event = EventFD(token.wrap_eventfd, 'r') + self._ptr = start_ptr + self._flags = flags + + @property + def key(self) -> str: + return self._shm.name + + @property + def size(self) -> int: + return self._shm.size + + @property + def ptr(self) -> int: + return self._ptr + + @property + def write_fd(self) -> int: + return self._write_event.fd + + @property + def wrap_fd(self) -> int: + return self._wrap_event.fd + + async def receive_some( + self, + max_bytes: int | None = None, + nb_timeout: float = 0.1 + ) -> memoryview: + # if non blocking eventfd enabled, do polling + # until next write, this allows signal handling + if self._flags | EFD_NONBLOCK: + delta = None + while delta is None: + try: + delta = await self._write_event.read() + + except OSError as e: + if e.errno == 'EAGAIN': + continue + + raise e + + else: + delta = await self._write_event.read() + + # fetch next segment and advance ptr + next_ptr = self._ptr + delta + segment = self._shm.buf[self._ptr:next_ptr] + self._ptr = next_ptr + + if self.ptr == self.size: + # reached the end, signal wrap around + self._ptr = 0 + self._wrap_event.write(1) + + return segment + + async def aclose(self): + self._write_event.close() + self._wrap_event.close() + self._shm.close() + + async def __aenter__(self): + self._write_event.open() + self._wrap_event.open() + return self diff --git a/tractor/ipc/_server.py b/tractor/ipc/_server.py new file mode 100644 index 00000000..a8732c10 --- /dev/null +++ b/tractor/ipc/_server.py @@ -0,0 +1,1071 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +High-level "IPC server" encapsulation for all your +multi-transport-protcol needs! + +''' +from __future__ import annotations +from collections import defaultdict +from contextlib import ( + asynccontextmanager as acm, +) +from functools import partial +from itertools import chain +import inspect +from pprint import pformat +from types import ( + ModuleType, +) +from typing import ( + Callable, + TYPE_CHECKING, +) + +import trio +from trio import ( + EventStatistics, + Nursery, + TaskStatus, + SocketListener, +) + +# from ..devx import debug +from .._exceptions import ( + TransportClosed, +) +from .. import _rpc +from ..msg import ( + MsgType, + Struct, + types as msgtypes, +) +from ..trionics import maybe_open_nursery +from .. import ( + _state, + log, +) +from .._addr import Address +from ._chan import Channel +from ._transport import MsgTransport +from ._uds import UDSAddress +from ._tcp import TCPAddress + +if TYPE_CHECKING: + from .._runtime import Actor + from .._supervise import ActorNursery + + +log = log.get_logger(__name__) + + +async def maybe_wait_on_canced_subs( + uid: tuple[str, str], + chan: Channel, + disconnected: bool, + + actor: Actor|None = None, + chan_drain_timeout: float = 0.5, + an_exit_timeout: float = 0.5, + +) -> ActorNursery|None: + ''' + When a process-local actor-nursery is found for the given actor + `uid` (i.e. that peer is **also** a subactor of this parent), we + attempt to (with timeouts) wait on, + + - all IPC msgs to drain on the (common) `Channel` such that all + local `Context`-parent-tasks can also gracefully collect + `ContextCancelled` msgs from their respective remote children + vs. a `chan_drain_timeout`. + + - the actor-nursery to cancel-n-join all its supervised children + (processes) *gracefully* vs. a `an_exit_timeout` and thus also + detect cases where the IPC transport connection broke but + a sub-process is detected as still alive (a case that happens + when the subactor is still in an active debugger REPL session). + + If the timeout expires in either case we ofc report with warning. + + ''' + actor = actor or _state.current_actor() + + # XXX running outside actor-runtime usage, + # - unit testing + # - possibly manual usage (eventually) ? + if not actor: + return None + + local_nursery: ( + ActorNursery|None + ) = actor._actoruid2nursery.get(uid) + + # This is set in `Portal.cancel_actor()`. So if + # the peer was cancelled we try to wait for them + # to tear down their side of the connection before + # moving on with closing our own side. + if ( + local_nursery + and ( + actor._cancel_called + or + chan._cancel_called + ) + # + # ^-TODO-^ along with this is there another condition + # that we should filter with to avoid entering this + # waiting block needlessly? + # -[ ] maybe `and local_nursery.cancelled` and/or + # only if the `._children` table is empty or has + # only `Portal`s with .chan._cancel_called == + # True` as per what we had below; the MAIN DIFF + # BEING that just bc one `Portal.cancel_actor()` + # was called, doesn't mean the whole actor-nurse + # is gonna exit any time soon right!? + # + # or + # all(chan._cancel_called for chan in chans) + + ): + log.cancel( + 'Waiting on cancel request to peer..\n' + f'c)=>\n' + f' |_{chan.aid}\n' + ) + + # XXX: this is a soft wait on the channel (and its + # underlying transport protocol) to close from the + # remote peer side since we presume that any channel + # which is mapped to a sub-actor (i.e. it's managed + # by local actor-nursery) has a message that is sent + # to the peer likely by this actor (which may be in + # a shutdown sequence due to cancellation) when the + # local runtime here is now cancelled while + # (presumably) in the middle of msg loop processing. + chan_info: str = ( + f'{chan.aid}\n' + f'|_{chan}\n' + f' |_{chan.transport}\n\n' + ) + with trio.move_on_after(chan_drain_timeout) as drain_cs: + drain_cs.shield = True + + # attempt to wait for the far end to close the + # channel and bail after timeout (a 2-generals + # problem on closure). + assert chan.transport + async for msg in chan.transport.drain(): + + # try to deliver any lingering msgs + # before we destroy the channel. + # This accomplishes deterministic + # ``Portal.cancel_actor()`` cancellation by + # making sure any RPC response to that call is + # delivered the local calling task. + # TODO: factor this into a helper? + log.warning( + 'Draining msg from disconnected peer\n' + f'{chan_info}' + f'{pformat(msg)}\n' + ) + # cid: str|None = msg.get('cid') + cid: str|None = msg.cid + if cid: + # deliver response to local caller/waiter + await actor._deliver_ctx_payload( + chan, + cid, + msg, + ) + if drain_cs.cancelled_caught: + log.warning( + 'Timed out waiting on IPC transport channel to drain?\n' + f'{chan_info}' + ) + + # XXX NOTE XXX when no explicit call to + # `open_root_actor()` was made by the application + # (normally we implicitly make that call inside + # the first `.open_nursery()` in root-actor + # user/app code), we can assume that either we + # are NOT the root actor or are root but the + # runtime was started manually. and thus DO have + # to wait for the nursery-enterer to exit before + # shutting down the local runtime to avoid + # clobbering any ongoing subactor + # teardown/debugging/graceful-cancel. + # + # see matching note inside `._supervise.open_nursery()` + # + # TODO: should we have a separate cs + timeout + # block here? + if ( + # XXX SO either, + # - not root OR, + # - is root but `open_root_actor()` was + # entered manually (in which case we do + # the equiv wait there using the + # `devx.debug` sub-sys APIs). + not local_nursery._implicit_runtime_started + ): + log.runtime( + 'Waiting on local actor nursery to exit..\n' + f'|_{local_nursery}\n' + ) + with trio.move_on_after(an_exit_timeout) as an_exit_cs: + an_exit_cs.shield = True + await local_nursery.exited.wait() + + # TODO: currently this is always triggering for every + # sub-daemon spawned from the `piker.services._mngr`? + # -[ ] how do we ensure that the IPC is supposed to + # be long lived and isn't just a register? + # |_ in the register case how can we signal that the + # ephemeral msg loop was intentional? + if ( + # not local_nursery._implicit_runtime_started + # and + an_exit_cs.cancelled_caught + ): + report: str = ( + 'Timed out waiting on local actor-nursery to exit?\n' + f'c)>\n' + f' |_{local_nursery}\n' + ) + if children := local_nursery._children: + # indent from above local-nurse repr + report += ( + f' |_{pformat(children)}\n' + ) + + log.warning(report) + + if disconnected: + # if the transport died and this actor is still + # registered within a local nursery, we report + # that the IPC layer may have failed + # unexpectedly since it may be the cause of + # other downstream errors. + entry: tuple|None = local_nursery._children.get(uid) + if entry: + proc: trio.Process + _, proc, _ = entry + + if ( + (poll := getattr(proc, 'poll', None)) + and + poll() is None # proc still alive + ): + # TODO: change log level based on + # detecting whether chan was created for + # ephemeral `.register_actor()` request! + # -[ ] also, that should be avoidable by + # re-using any existing chan from the + # `._discovery.get_registry()` call as + # well.. + log.runtime( + f'Peer IPC broke but subproc is alive?\n\n' + + f'<=x {chan.aid}@{chan.raddr}\n' + f' |_{proc}\n' + ) + + return local_nursery + +# TODO multi-tpt support with per-proto peer tracking? +# +# -[x] maybe change to mod-func and rename for implied +# multi-transport semantics? +# -[ ] register each stream/tpt/chan with the owning `Endpoint` +# so that we can query per tpt all peer contact infos? +# |_[ ] possibly provide a global viewing via a +# `collections.ChainMap`? +# +async def handle_stream_from_peer( + stream: trio.SocketStream, + + *, + server: IPCServer, + +) -> None: + ''' + Top-level `trio.abc.Stream` (i.e. normally `trio.SocketStream`) + handler-callback as spawn-invoked by `trio.serve_listeners()`. + + Note that each call to this handler is as a spawned task inside + any `IPCServer.listen_on()` passed `stream_handler_tn: Nursery` + such that it is invoked as, + + Endpoint.stream_handler_tn.start_soon( + handle_stream, + stream, + ) + + ''' + server._no_more_peers = trio.Event() # unset by making new + + # TODO, debug_mode tooling for when hackin this lower layer? + # with debug.maybe_open_crash_handler( + # pdb=True, + # ) as boxerr: + + chan = Channel.from_stream(stream) + con_status: str = ( + 'New inbound IPC connection <=\n' + f'|_{chan}\n' + ) + + # initial handshake with peer phase + try: + if actor := _state.current_actor(): + peer_aid: msgtypes.Aid = await chan._do_handshake( + aid=actor.aid, + ) + except ( + TransportClosed, + # ^XXX NOTE, the above wraps `trio` exc types raised + # during various `SocketStream.send/receive_xx()` calls + # under different fault conditions such as, + # + # trio.BrokenResourceError, + # trio.ClosedResourceError, + # + # Inside our `.ipc._transport` layer we absorb and + # re-raise our own `TransportClosed` exc such that this + # higher level runtime code can only worry one + # "kinda-error" that we expect to tolerate during + # discovery-sys related pings, queires, DoS etc. + ): + # XXX: This may propagate up from `Channel._aiter_recv()` + # and `MsgpackStream._inter_packets()` on a read from the + # stream particularly when the runtime is first starting up + # inside `open_root_actor()` where there is a check for + # a bound listener on the "arbiter" addr. the reset will be + # because the handshake was never meant took place. + log.runtime( + con_status + + + ' -> But failed to handshake? Ignoring..\n' + ) + return + + uid: tuple[str, str] = ( + peer_aid.name, + peer_aid.uuid, + ) + # TODO, can we make this downstream peer tracking use the + # `peer_aid` instead? + familiar: str = 'new-peer' + if _pre_chan := server._peers.get(uid): + familiar: str = 'pre-existing-peer' + uid_short: str = f'{uid[0]}[{uid[1][-6:]}]' + con_status += ( + f' -> Handshake with {familiar} `{uid_short}` complete\n' + ) + + if _pre_chan: + # con_status += ( + # ^TODO^ swap once we minimize conn duplication + # -[ ] last thing might be reg/unreg runtime reqs? + # log.warning( + log.debug( + f'?Wait?\n' + f'We already have IPC with peer {uid_short!r}\n' + f'|_{_pre_chan}\n' + ) + + # IPC connection tracking for both peers and new children: + # - if this is a new channel to a locally spawned + # sub-actor there will be a spawn wait even registered + # by a call to `.wait_for_peer()`. + # - if a peer is connecting no such event will exit. + event: trio.Event|None = server._peer_connected.pop( + uid, + None, + ) + if event: + con_status += ( + ' -> Waking subactor spawn waiters: ' + f'{event.statistics().tasks_waiting}\n' + f' -> Registered IPC chan for child actor {uid}@{chan.raddr}\n' + # f' {event}\n' + # f' |{event.statistics()}\n' + ) + # wake tasks waiting on this IPC-transport "connect-back" + event.set() + + else: + con_status += ( + f' -> Registered IPC chan for peer actor {uid}@{chan.raddr}\n' + ) # type: ignore + + chans: list[Channel] = server._peers[uid] + # if chans: + # # TODO: re-use channels for new connections instead + # # of always new ones? + # # => will require changing all the discovery funcs.. + + # append new channel + # TODO: can we just use list-ref directly? + chans.append(chan) + + con_status += ' -> Entering RPC msg loop..\n' + log.runtime(con_status) + + # Begin channel management - respond to remote requests and + # process received reponses. + disconnected: bool = False + last_msg: MsgType + try: + ( + disconnected, + last_msg, + ) = await _rpc.process_messages( + chan=chan, + ) + except trio.Cancelled: + log.cancel( + 'IPC transport msg loop was cancelled\n' + f'c)>\n' + f' |_{chan}\n' + ) + raise + + finally: + + # check if there are subs which we should gracefully join at + # both the inter-actor-task and subprocess levels to + # gracefully remote cancel and later disconnect (particularly + # for permitting subs engaged in active debug-REPL sessions). + local_nursery: ActorNursery|None = await maybe_wait_on_canced_subs( + uid=uid, + chan=chan, + disconnected=disconnected, + ) + + # ``Channel`` teardown and closure sequence + # drop ref to channel so it can be gc-ed and disconnected + con_teardown_status: str = ( + f'IPC channel disconnected:\n' + f'<=x uid: {chan.aid}\n' + f' |_{pformat(chan)}\n\n' + ) + chans.remove(chan) + + # TODO: do we need to be this pedantic? + if not chans: + con_teardown_status += ( + f'-> No more channels with {chan.aid}' + ) + server._peers.pop(uid, None) + + peers_str: str = '' + for uid, chans in server._peers.items(): + peers_str += ( + f'uid: {uid}\n' + ) + for i, chan in enumerate(chans): + peers_str += ( + f' |_[{i}] {pformat(chan)}\n' + ) + + con_teardown_status += ( + f'-> Remaining IPC {len(server._peers)} peers: {peers_str}\n' + ) + + # No more channels to other actors (at all) registered + # as connected. + if not server._peers: + con_teardown_status += ( + 'Signalling no more peer channel connections' + ) + server._no_more_peers.set() + + # NOTE: block this actor from acquiring the + # debugger-TTY-lock since we have no way to know if we + # cancelled it and further there is no way to ensure the + # lock will be released if acquired due to having no + # more active IPC channels. + if ( + _state.is_root_process() + and + _state.is_debug_mode() + ): + from ..devx import debug + pdb_lock = debug.Lock + pdb_lock._blocked.add(uid) + + # TODO: NEEEDS TO BE TESTED! + # actually, no idea if this ever even enters.. XD + # + # XXX => YES IT DOES, when i was testing ctl-c + # from broken debug TTY locking due to + # msg-spec races on application using RunVar... + if ( + local_nursery + and + (ctx_in_debug := pdb_lock.ctx_in_debug) + and + (pdb_user_uid := ctx_in_debug.chan.aid) + ): + entry: tuple|None = local_nursery._children.get( + tuple(pdb_user_uid) + ) + if entry: + proc: trio.Process + _, proc, _ = entry + + if ( + (poll := getattr(proc, 'poll', None)) + and poll() is None + ): + log.cancel( + 'Root actor reports no-more-peers, BUT\n' + 'a DISCONNECTED child still has the debug ' + 'lock!\n\n' + # f'root uid: {actor.uid}\n' + f'last disconnected child uid: {uid}\n' + f'locking child uid: {pdb_user_uid}\n' + ) + await debug.maybe_wait_for_debugger( + child_in_debug=True + ) + + # TODO: just bc a child's transport dropped + # doesn't mean it's not still using the pdb + # REPL! so, + # -[ ] ideally we can check out child proc + # tree to ensure that its alive (and + # actually using the REPL) before we cancel + # it's lock acquire by doing the below! + # -[ ] create a way to read the tree of each actor's + # grandchildren such that when an + # intermediary parent is cancelled but their + # child has locked the tty, the grandparent + # will not allow the parent to cancel or + # zombie reap the child! see open issue: + # - https://github.com/goodboy/tractor/issues/320 + # ------ - ------ + # if a now stale local task has the TTY lock still + # we cancel it to allow servicing other requests for + # the lock. + if ( + (db_cs := pdb_lock.get_locking_task_cs()) + and not db_cs.cancel_called + and uid == pdb_user_uid + ): + log.critical( + f'STALE DEBUG LOCK DETECTED FOR {uid}' + ) + # TODO: figure out why this breaks tests.. + db_cs.cancel() + + log.runtime(con_teardown_status) + # finally block closure + + +class Endpoint(Struct): + ''' + An instance of an IPC "bound" address where the lifetime of the + "ability to accept connections" (from clients) and then handle + those inbound sessions or sequences-of-packets is determined by + a (maybe pair of) nurser(y/ies). + + ''' + addr: Address + listen_tn: Nursery + stream_handler_tn: Nursery|None = None + + # NOTE, normally filled in by calling `.start_listener()` + _listener: SocketListener|None = None + + # ?TODO, mk stream_handler hook into this ep instance so that we + # always keep track of all `SocketStream` instances per + # listener/ep? + peer_tpts: dict[ + UDSAddress|TCPAddress, # peer addr + MsgTransport, # handle to encoded-msg transport stream + ] = {} + + async def start_listener(self) -> SocketListener: + tpt_mod: ModuleType = inspect.getmodule(self.addr) + lstnr: SocketListener = await tpt_mod.start_listener( + addr=self.addr, + ) + + # NOTE, for handling the resolved non-0 port for + # TCP/UDP network sockets. + if ( + (unwrapped := lstnr.socket.getsockname()) + != + self.addr.unwrap() + ): + self.addr=self.addr.from_addr(unwrapped) + + self._listener = lstnr + return lstnr + + def close_listener( + self, + ) -> bool: + tpt_mod: ModuleType = inspect.getmodule(self.addr) + closer: Callable = getattr( + tpt_mod, + 'close_listener', + False, + ) + # when no defined closing is implicit! + if not closer: + return True + return closer( + addr=self.addr, + lstnr=self._listener, + ) + + +class Server(Struct): + _parent_tn: Nursery + _stream_handler_tn: Nursery + # level-triggered sig for whether "no peers are currently + # connected"; field is **always** set to an instance but + # initialized with `.is_set() == True`. + _no_more_peers: trio.Event + + _endpoints: list[Endpoint] = [] + + # connection tracking & mgmt + _peers: defaultdict[ + str, # uaid + list[Channel], # IPC conns from peer + ] = defaultdict(list) + _peer_connected: dict[ + tuple[str, str], + trio.Event, + ] = {} + + # syncs for setup/teardown sequences + _shutdown: trio.Event|None = None + + # TODO, maybe just make `._endpoints: list[Endpoint]` and + # provide dict-views onto it? + # @property + # def addrs2eps(self) -> dict[Address, Endpoint]: + # ... + + @property + def proto_keys(self) -> list[str]: + return [ + ep.addr.proto_key + for ep in self._endpoints + ] + + # def cancel_server(self) -> bool: + def cancel( + self, + + # !TODO, suport just shutting down accepting new clients, + # not existing ones! + # only_listeners: str|None = None + + ) -> bool: + ''' + Cancel this IPC transport server nursery thereby + preventing any new inbound IPC connections establishing. + + ''' + if self._parent_tn: + # TODO: obvi a different server type when we eventually + # support some others XD + log.runtime( + f'Cancelling server(s) for\n' + f'{self.proto_keys!r}\n' + ) + self._parent_tn.cancel_scope.cancel() + return True + + log.warning( + 'No IPC server started before cancelling ?' + ) + return False + + async def wait_for_shutdown( + self, + ) -> bool: + if self._shutdown is not None: + await self._shutdown.wait() + else: + tpt_protos: list[str] = [] + ep: Endpoint + for ep in self._endpoints: + tpt_protos.append(ep.addr.proto_key) + + log.warning( + 'Transport server(s) may have been cancelled before started?\n' + f'protos: {tpt_protos!r}\n' + ) + + def has_peers( + self, + check_chans: bool = False, + ) -> bool: + ''' + Predicate for "are there any active peer IPC `Channel`s at the moment?" + + ''' + has_peers: bool = not self._no_more_peers.is_set() + if ( + has_peers + and + check_chans + ): + has_peers: bool = ( + any(chan.connected() + for chan in chain( + *self._peers.values() + ) + ) + and + has_peers + ) + + return has_peers + + async def wait_for_no_more_peers( + self, + shield: bool = False, + ) -> None: + with trio.CancelScope(shield=shield): + await self._no_more_peers.wait() + + async def wait_for_peer( + self, + uid: tuple[str, str], + + ) -> tuple[trio.Event, Channel]: + ''' + Wait for a connection back from a (spawned sub-)actor with + a `uid` using a `trio.Event`. + + Returns a pair of the event and the "last" registered IPC + `Channel` for the peer with `uid`. + + ''' + log.debug(f'Waiting for peer {uid!r} to connect') + event: trio.Event = self._peer_connected.setdefault( + uid, + trio.Event(), + ) + await event.wait() + log.debug(f'{uid!r} successfully connected back to us') + mru_chan: Channel = self._peers[uid][-1] + return ( + event, + mru_chan, + ) + + @property + def addrs(self) -> list[Address]: + return [ep.addr for ep in self._endpoints] + + @property + def accept_addrs(self) -> list[str, str|int]: + ''' + The `list` of `Address.unwrap()`-ed active IPC endpoint addrs. + + ''' + return [ep.addr.unwrap() for ep in self._endpoints] + + def epsdict(self) -> dict[ + Address, + Endpoint, + ]: + return { + ep.addr: ep + for ep in self._endpoints + } + + def is_shutdown(self) -> bool: + if (ev := self._shutdown) is None: + return False + + return ev.is_set() + + def pformat(self) -> str: + eps: list[Endpoint] = self._endpoints + + state_repr: str = ( + f'{len(eps)!r} IPC-endpoints active' + ) + fmtstr = ( + f' |_state: {state_repr}\n' + f' no_more_peers: {self.has_peers()}\n' + ) + if self._shutdown is not None: + shutdown_stats: EventStatistics = self._shutdown.statistics() + fmtstr += ( + f' task_waiting_on_shutdown: {shutdown_stats}\n' + ) + + fmtstr += ( + # TODO, use the `ppfmt()` helper from `modden`! + f' |_endpoints: {pformat(self._endpoints)}\n' + f' |_peers: {len(self._peers)} connected\n' + ) + + return ( + f'\n' + ) + + __repr__ = pformat + + # TODO? maybe allow shutting down a `.listen_on()`s worth of + # listeners by cancelling the corresponding + # `Endpoint._listen_tn` only ? + # -[ ] in theory you could use this to + # "boot-and-wait-for-reconnect" of all current and connecting + # peers? + # |_ would require that the stream-handler is intercepted so we + # can intercept every `MsgTransport` (stream) and track per + # `Endpoint` likely? + # + # async def unlisten( + # self, + # listener: SocketListener, + # ) -> bool: + # ... + + async def listen_on( + self, + *, + accept_addrs: list[tuple[str, int|str]]|None = None, + stream_handler_nursery: Nursery|None = None, + ) -> list[Endpoint]: + ''' + Start `SocketListeners` (i.e. bind and call `socket.listen()`) + for all IPC-transport-protocol specific `Address`-types + in `accept_addrs`. + + ''' + from .._addr import ( + default_lo_addrs, + wrap_address, + ) + if accept_addrs is None: + accept_addrs = default_lo_addrs([ + _state._def_tpt_proto + ]) + + else: + accept_addrs: list[Address] = [ + wrap_address(a) for a in accept_addrs + ] + + if self._shutdown is None: + self._shutdown = trio.Event() + + elif self.is_shutdown(): + raise RuntimeError( + f'IPC server has already terminated ?\n' + f'{self}\n' + ) + + log.runtime( + f'Binding to endpoints for,\n' + f'{accept_addrs}\n' + ) + eps: list[Endpoint] = await self._parent_tn.start( + partial( + _serve_ipc_eps, + server=self, + stream_handler_tn=stream_handler_nursery, + listen_addrs=accept_addrs, + ) + ) + log.runtime( + f'Started IPC endpoints\n' + f'{eps}\n' + ) + + self._endpoints.extend(eps) + # XXX, just a little bit of sanity + group_tn: Nursery|None = None + ep: Endpoint + for ep in eps: + if ep.addr not in self.addrs: + breakpoint() + + if group_tn is None: + group_tn = ep.listen_tn + else: + assert group_tn is ep.listen_tn + + return eps + + +# alias until we decide on final naming +IPCServer = Server + + +async def _serve_ipc_eps( + *, + server: IPCServer, + stream_handler_tn: Nursery, + listen_addrs: list[tuple[str, int|str]], + + task_status: TaskStatus[ + Nursery, + ] = trio.TASK_STATUS_IGNORED, +) -> None: + ''' + Start IPC transport server(s) for the actor, begin + listening/accepting new `trio.SocketStream` connections + from peer actors via a `SocketListener`. + + This will cause an actor to continue living (and thus + blocking at the process/OS-thread level) until + `.cancel_server()` is called. + + ''' + try: + listen_tn: Nursery + async with trio.open_nursery() as listen_tn: + + eps: list[Endpoint] = [] + # XXX NOTE, required to call `serve_listeners()` below. + # ?TODO, maybe just pass `list(eps.values()` tho? + listeners: list[trio.abc.Listener] = [] + for addr in listen_addrs: + ep = Endpoint( + addr=addr, + listen_tn=listen_tn, + stream_handler_tn=stream_handler_tn, + ) + try: + log.runtime( + f'Starting new endpoint listener\n' + f'{ep}\n' + ) + listener: trio.abc.Listener = await ep.start_listener() + assert listener is ep._listener + # actor = _state.current_actor() + # if actor.is_registry: + # import pdbp; pdbp.set_trace() + + except OSError as oserr: + if ( + '[Errno 98] Address already in use' + in + oserr.args#[0] + ): + log.exception( + f'Address already in use?\n' + f'{addr}\n' + ) + raise + + listeners.append(listener) + eps.append(ep) + + _listeners: list[SocketListener] = await listen_tn.start( + partial( + trio.serve_listeners, + handler=partial( + handle_stream_from_peer, + server=server, + ), + listeners=listeners, + + # NOTE: configured such that new + # connections will stay alive even if + # this server is cancelled! + handler_nursery=stream_handler_tn + ) + ) + # TODO, wow make this message better! XD + log.runtime( + 'Started server(s)\n' + + + '\n'.join([f'|_{addr}' for addr in listen_addrs]) + ) + + log.runtime( + f'Started IPC endpoints\n' + f'{eps}\n' + ) + task_status.started( + eps, + ) + + finally: + if eps: + addr: Address + ep: Endpoint + for addr, ep in server.epsdict().items(): + ep.close_listener() + server._endpoints.remove(ep) + + # actor = _state.current_actor() + # if actor.is_arbiter: + # import pdbp; pdbp.set_trace() + + # signal the server is "shutdown"/"terminated" + # since no more active endpoints are active. + if not server._endpoints: + server._shutdown.set() + +@acm +async def open_ipc_server( + parent_tn: Nursery|None = None, + stream_handler_tn: Nursery|None = None, + +) -> IPCServer: + + async with maybe_open_nursery( + nursery=parent_tn, + ) as rent_tn: + no_more_peers = trio.Event() + no_more_peers.set() + + ipc_server = IPCServer( + _parent_tn=rent_tn, + _stream_handler_tn=stream_handler_tn or rent_tn, + _no_more_peers=no_more_peers, + ) + try: + yield ipc_server + log.runtime( + f'Waiting on server to shutdown or be cancelled..\n' + f'{ipc_server}' + ) + # TODO? when if ever would we want/need this? + # with trio.CancelScope(shield=True): + # await ipc_server.wait_for_shutdown() + + except BaseException as berr: + log.exception( + 'IPC server caller crashed ??' + ) + # ?TODO, maybe we can ensure the endpoints are torndown + # (and thus their managed listeners) beforehand to ensure + # super graceful RPC mechanics? + # + # -[ ] but aren't we doing that already per-`listen_tn` + # inside `_serve_ipc_eps()` above? + # + # ipc_server.cancel() + raise berr diff --git a/tractor/_shm.py b/tractor/ipc/_shm.py similarity index 95% rename from tractor/_shm.py rename to tractor/ipc/_shm.py index f8295105..62b26e79 100644 --- a/tractor/_shm.py +++ b/tractor/ipc/_shm.py @@ -32,10 +32,14 @@ from multiprocessing.shared_memory import ( ShareableList, ) -from msgspec import Struct +from msgspec import ( + Struct, + to_builtins +) import tractor -from .log import get_logger +from tractor.ipc._mp_bs import disable_mantracker +from tractor.log import get_logger _USE_POSIX = getattr(shm, '_USE_POSIX', False) @@ -46,7 +50,10 @@ if _USE_POSIX: try: import numpy as np from numpy.lib import recfunctions as rfn - import nptyping + # TODO ruff complains with, + # warning| F401: `nptyping` imported but unused; consider using + # `importlib.util.find_spec` to test for availability + import nptyping # noqa except ImportError: pass @@ -54,34 +61,6 @@ except ImportError: log = get_logger(__name__) -def disable_mantracker(): - ''' - Disable all ``multiprocessing``` "resource tracking" machinery since - it's an absolute multi-threaded mess of non-SC madness. - - ''' - from multiprocessing import resource_tracker as mantracker - - # Tell the "resource tracker" thing to fuck off. - class ManTracker(mantracker.ResourceTracker): - def register(self, name, rtype): - pass - - def unregister(self, name, rtype): - pass - - def ensure_running(self): - pass - - # "know your land and know your prey" - # https://www.dailymotion.com/video/x6ozzco - mantracker._resource_tracker = ManTracker() - mantracker.register = mantracker._resource_tracker.register - mantracker.ensure_running = mantracker._resource_tracker.ensure_running - mantracker.unregister = mantracker._resource_tracker.unregister - mantracker.getfd = mantracker._resource_tracker.getfd - - disable_mantracker() @@ -142,7 +121,7 @@ class NDToken(Struct, frozen=True): ).descr def as_msg(self): - return self.to_dict() + return to_builtins(self) @classmethod def from_msg(cls, msg: dict) -> NDToken: diff --git a/tractor/ipc/_tcp.py b/tractor/ipc/_tcp.py new file mode 100644 index 00000000..e945cdfb --- /dev/null +++ b/tractor/ipc/_tcp.py @@ -0,0 +1,256 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +TCP implementation of tractor.ipc._transport.MsgTransport protocol + +''' +from __future__ import annotations +import ipaddress +from typing import ( + ClassVar, +) +# from contextlib import ( +# asynccontextmanager as acm, +# ) + +import msgspec +import trio +from trio import ( + SocketListener, + open_tcp_listeners, +) + +from tractor.msg import MsgCodec +from tractor.log import get_logger +from tractor.ipc._transport import ( + MsgTransport, + MsgpackTransport, +) + + +log = get_logger(__name__) + + +class TCPAddress( + msgspec.Struct, + frozen=True, +): + _host: str + _port: int + + def __post_init__(self): + try: + ipaddress.ip_address(self._host) + except ValueError as valerr: + raise ValueError( + 'Invalid {type(self).__name__}._host = {self._host!r}\n' + ) from valerr + + proto_key: ClassVar[str] = 'tcp' + unwrapped_type: ClassVar[type] = tuple[str, int] + def_bindspace: ClassVar[str] = '127.0.0.1' + + # ?TODO, actually validate ipv4/6 with stdlib's `ipaddress` + @property + def is_valid(self) -> bool: + ''' + Predicate to ensure a valid socket-address pair. + + ''' + return ( + self._port != 0 + and + (ipaddr := ipaddress.ip_address(self._host)) + and not ( + ipaddr.is_reserved + or + ipaddr.is_unspecified + or + ipaddr.is_link_local + or + ipaddr.is_link_local + or + ipaddr.is_multicast + or + ipaddr.is_global + ) + ) + # ^XXX^ see various properties of invalid addrs here, + # https://docs.python.org/3/library/ipaddress.html#ipaddress.IPv4Address + + @property + def bindspace(self) -> str: + return self._host + + @property + def domain(self) -> str: + return self._host + + @classmethod + def from_addr( + cls, + addr: tuple[str, int] + ) -> TCPAddress: + match addr: + case (str(), int()): + return TCPAddress(addr[0], addr[1]) + case _: + raise ValueError( + f'Invalid unwrapped address for {cls}\n' + f'{addr}\n' + ) + + def unwrap(self) -> tuple[str, int]: + return ( + self._host, + self._port, + ) + + @classmethod + def get_random( + cls, + bindspace: str = def_bindspace, + ) -> TCPAddress: + return TCPAddress(bindspace, 0) + + @classmethod + def get_root(cls) -> TCPAddress: + return TCPAddress( + '127.0.0.1', + 1616, + ) + + def __repr__(self) -> str: + return ( + f'{type(self).__name__}[{self.unwrap()}]' + ) + + @classmethod + def get_transport( + cls, + codec: str = 'msgpack', + ) -> MsgTransport: + match codec: + case 'msgspack': + return MsgpackTCPStream + case _: + raise ValueError( + f'No IPC transport with {codec!r} supported !' + ) + + +async def start_listener( + addr: TCPAddress, + **kwargs, +) -> SocketListener: + ''' + Start a TCP socket listener on the given `TCPAddress`. + + ''' + log.info( + f'Attempting to bind TCP socket\n' + f'>[\n' + f'|_{addr}\n' + ) + # ?TODO, maybe we should just change the lower-level call this is + # using internall per-listener? + listeners: list[SocketListener] = await open_tcp_listeners( + host=addr._host, + port=addr._port, + **kwargs + ) + # NOTE, for now we don't expect non-singleton-resolving + # domain-addresses/multi-homed-hosts. + # (though it is supported by `open_tcp_listeners()`) + assert len(listeners) == 1 + listener = listeners[0] + host, port = listener.socket.getsockname()[:2] + + log.info( + f'Listening on TCP socket\n' + f'[>\n' + f' |_{addr}\n' + ) + return listener + + +# TODO: typing oddity.. not sure why we have to inherit here, but it +# seems to be an issue with `get_msg_transport()` returning +# a `Type[Protocol]`; probably should make a `mypy` issue? +class MsgpackTCPStream(MsgpackTransport): + ''' + A ``trio.SocketStream`` delivering ``msgpack`` formatted data + using the ``msgspec`` codec lib. + + ''' + address_type = TCPAddress + layer_key: int = 4 + + @property + def maddr(self) -> str: + host, port = self.raddr.unwrap() + return ( + # TODO, use `ipaddress` from stdlib to handle + # first detecting which of `ipv4/6` before + # choosing the routing prefix part. + f'/ipv4/{host}' + + f'/{self.address_type.proto_key}/{port}' + # f'/{self.chan.uid[0]}' + # f'/{self.cid}' + + # f'/cid={cid_head}..{cid_tail}' + # TODO: ? not use this ^ right ? + ) + + def connected(self) -> bool: + return self.stream.socket.fileno() != -1 + + @classmethod + async def connect_to( + cls, + destaddr: TCPAddress, + prefix_size: int = 4, + codec: MsgCodec|None = None, + **kwargs + ) -> MsgpackTCPStream: + stream = await trio.open_tcp_stream( + *destaddr.unwrap(), + **kwargs + ) + return MsgpackTCPStream( + stream, + prefix_size=prefix_size, + codec=codec + ) + + @classmethod + def get_stream_addrs( + cls, + stream: trio.SocketStream + ) -> tuple[ + TCPAddress, + TCPAddress, + ]: + # TODO, what types are these? + lsockname = stream.socket.getsockname() + l_sockaddr: tuple[str, int] = tuple(lsockname[:2]) + rsockname = stream.socket.getpeername() + r_sockaddr: tuple[str, int] = tuple(rsockname[:2]) + return ( + TCPAddress.from_addr(l_sockaddr), + TCPAddress.from_addr(r_sockaddr), + ) diff --git a/tractor/ipc/_transport.py b/tractor/ipc/_transport.py new file mode 100644 index 00000000..6bfa5f6a --- /dev/null +++ b/tractor/ipc/_transport.py @@ -0,0 +1,514 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +typing.Protocol based generic msg API, implement this class to add +backends for tractor.ipc.Channel + +''' +from __future__ import annotations +from typing import ( + runtime_checkable, + Type, + Protocol, + # TypeVar, + ClassVar, + TYPE_CHECKING, +) +from collections.abc import ( + AsyncGenerator, + AsyncIterator, +) +import struct + +import trio +import msgspec +from tricycle import BufferedReceiveStream + +from tractor.log import get_logger +from tractor._exceptions import ( + MsgTypeError, + TransportClosed, + _mk_send_mte, + _mk_recv_mte, +) +from tractor.msg import ( + _ctxvar_MsgCodec, + # _codec, XXX see `self._codec` sanity/debug checks + MsgCodec, + MsgType, + types as msgtypes, + pretty_struct, +) + +if TYPE_CHECKING: + from tractor._addr import Address + +log = get_logger(__name__) + + +# (codec, transport) +MsgTransportKey = tuple[str, str] + + +# from tractor.msg.types import MsgType +# ?TODO? this should be our `Union[*msgtypes.__spec__]` alias now right..? +# => BLEH, except can't bc prots must inherit typevar or param-spec +# vars.. +# MsgType = TypeVar('MsgType') + + +@runtime_checkable +class MsgTransport(Protocol): +# +# class MsgTransport(Protocol[MsgType]): +# ^-TODO-^ consider using a generic def and indexing with our +# eventual msg definition/types? +# - https://docs.python.org/3/library/typing.html#typing.Protocol + + stream: trio.SocketStream + drained: list[MsgType] + + address_type: ClassVar[Type[Address]] + codec_key: ClassVar[str] + + # XXX: should this instead be called `.sendall()`? + async def send(self, msg: MsgType) -> None: + ... + + async def recv(self) -> MsgType: + ... + + def __aiter__(self) -> MsgType: + ... + + def connected(self) -> bool: + ... + + # defining this sync otherwise it causes a mypy error because it + # can't figure out it's a generator i guess?..? + def drain(self) -> AsyncIterator[dict]: + ... + + @classmethod + def key(cls) -> MsgTransportKey: + return ( + cls.codec_key, + cls.address_type.proto_key, + ) + + @property + def laddr(self) -> Address: + ... + + @property + def raddr(self) -> Address: + ... + + @property + def maddr(self) -> str: + ... + + @classmethod + async def connect_to( + cls, + addr: Address, + **kwargs + ) -> MsgTransport: + ... + + @classmethod + def get_stream_addrs( + cls, + stream: trio.abc.Stream + ) -> tuple[ + Address, # local + Address # remote + ]: + ''' + Return the transport protocol's address pair for the local + and remote-peer side. + + ''' + ... + + # TODO, such that all `.raddr`s for each `SocketStream` are + # delivered? + # -[ ] move `.open_listener()` here and internally track the + # listener set, per address? + # def get_peers( + # self, + # ) -> list[Address]: + # ... + + + +class MsgpackTransport(MsgTransport): + + # TODO: better naming for this? + # -[ ] check how libp2p does naming for such things? + codec_key: str = 'msgpack' + + def __init__( + self, + stream: trio.abc.Stream, + prefix_size: int = 4, + + # XXX optionally provided codec pair for `msgspec`: + # https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types + # + # TODO: define this as a `Codec` struct which can be + # overriden dynamically by the application/runtime? + codec: MsgCodec = None, + + ) -> None: + self.stream = stream + ( + self._laddr, + self._raddr, + ) = self.get_stream_addrs(stream) + + # create read loop instance + self._aiter_pkts = self._iter_packets() + self._send_lock = trio.StrictFIFOLock() + + # public i guess? + self.drained: list[dict] = [] + + self.recv_stream = BufferedReceiveStream( + transport_stream=stream + ) + self.prefix_size = prefix_size + + # allow for custom IPC msg interchange format + # dynamic override Bo + self._task = trio.lowlevel.current_task() + + # XXX for ctxvar debug only! + # self._codec: MsgCodec = ( + # codec + # or + # _codec._ctxvar_MsgCodec.get() + # ) + + async def _iter_packets(self) -> AsyncGenerator[dict, None]: + ''' + Yield `bytes`-blob decoded packets from the underlying TCP + stream using the current task's `MsgCodec`. + + This is a streaming routine implemented as an async generator + func (which was the original design, but could be changed?) + and is allocated by a `.__call__()` inside `.__init__()` where + it is assigned to the `._aiter_pkts` attr. + + ''' + decodes_failed: int = 0 + + tpt_name: str = f'{type(self).__name__!r}' + while True: + try: + header: bytes = await self.recv_stream.receive_exactly(4) + except ( + ValueError, + ConnectionResetError, + + # not sure entirely why we need this but without it we + # seem to be getting racy failures here on + # arbiter/registry name subs.. + trio.BrokenResourceError, + + ) as trans_err: + + loglevel = 'transport' + match trans_err: + # case ( + # ConnectionResetError() + # ): + # loglevel = 'transport' + + # peer actor (graceful??) TCP EOF but `tricycle` + # seems to raise a 0-bytes-read? + case ValueError() if ( + 'unclean EOF' in trans_err.args[0] + ): + pass + + # peer actor (task) prolly shutdown quickly due + # to cancellation + case trio.BrokenResourceError() if ( + 'Connection reset by peer' in trans_err.args[0] + ): + pass + + # unless the disconnect condition falls under "a + # normal operation breakage" we usualy console warn + # about it. + case _: + loglevel: str = 'warning' + + + raise TransportClosed( + message=( + f'{tpt_name} already closed by peer\n' + ), + src_exc=trans_err, + loglevel=loglevel, + ) from trans_err + + # XXX definitely can happen if transport is closed + # manually by another `trio.lowlevel.Task` in the + # same actor; we use this in some simulated fault + # testing for ex, but generally should never happen + # under normal operation! + # + # NOTE: as such we always re-raise this error from the + # RPC msg loop! + except trio.ClosedResourceError as cre: + closure_err = cre + + raise TransportClosed( + message=( + f'{tpt_name} was already closed locally ?\n' + ), + src_exc=closure_err, + loglevel='error', + raise_on_report=( + 'another task closed this fd' in closure_err.args + ), + ) from closure_err + + # graceful TCP EOF disconnect + if header == b'': + raise TransportClosed( + message=( + f'{tpt_name} already gracefully closed\n' + ), + loglevel='transport', + ) + + size: int + size, = struct.unpack(" None: + ''' + Send a msgpack encoded py-object-blob-as-msg over TCP. + + If `strict_types == True` then a `MsgTypeError` will be raised on any + invalid msg type + + ''' + __tracebackhide__: bool = hide_tb + + # XXX see `trio._sync.AsyncContextManagerMixin` for details + # on the `.acquire()`/`.release()` sequencing.. + async with self._send_lock: + + # NOTE: lookup the `trio.Task.context`'s var for + # the current `MsgCodec`. + codec: MsgCodec = _ctxvar_MsgCodec.get() + + # XXX for ctxvar debug only! + # if self._codec.pld_spec != codec.pld_spec: + # self._codec = codec + # log.runtime( + # f'Using new codec in {self}.send()\n' + # f'codec: {self._codec}\n\n' + # f'msg: {msg}\n' + # ) + + if type(msg) not in msgtypes.__msg_types__: + if strict_types: + raise _mk_send_mte( + msg, + codec=codec, + ) + else: + log.warning( + 'Sending non-`Msg`-spec msg?\n\n' + f'{msg}\n' + ) + + try: + bytes_data: bytes = codec.encode(msg) + except TypeError as _err: + typerr = _err + msgtyperr: MsgTypeError = _mk_send_mte( + msg, + codec=codec, + message=( + f'IPC-msg-spec violation in\n\n' + f'{pretty_struct.Struct.pformat(msg)}' + ), + src_type_error=typerr, + ) + raise msgtyperr from typerr + + # supposedly the fastest says, + # https://stackoverflow.com/a/54027962 + size: bytes = struct.pack(" + # except BaseException as _err: + # err = _err + # if not isinstance(err, MsgTypeError): + # __tracebackhide__: bool = False + # raise + + async def recv(self) -> msgtypes.MsgType: + return await self._aiter_pkts.asend(None) + + async def drain(self) -> AsyncIterator[dict]: + ''' + Drain the stream's remaining messages sent from + the far end until the connection is closed by + the peer. + + ''' + try: + async for msg in self._iter_packets(): + self.drained.append(msg) + except TransportClosed: + for msg in self.drained: + yield msg + + def __aiter__(self): + return self._aiter_pkts + + @property + def laddr(self) -> Address: + return self._laddr + + @property + def raddr(self) -> Address: + return self._raddr + + def pformat(self) -> str: + return ( + f'<{type(self).__name__}(\n' + f' |_peers: 2\n' + f' laddr: {self._laddr}\n' + f' raddr: {self._raddr}\n' + # f'\n' + f' |_task: {self._task}\n' + f')>\n' + ) + + __repr__ = __str__ = pformat diff --git a/tractor/ipc/_types.py b/tractor/ipc/_types.py new file mode 100644 index 00000000..59653b17 --- /dev/null +++ b/tractor/ipc/_types.py @@ -0,0 +1,123 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +IPC subsys type-lookup helpers? + +''' +from typing import ( + Type, + # TYPE_CHECKING, +) + +import trio +import socket + +from tractor.ipc._transport import ( + MsgTransportKey, + MsgTransport +) +from tractor.ipc._tcp import ( + TCPAddress, + MsgpackTCPStream, +) +from tractor.ipc._uds import ( + UDSAddress, + MsgpackUDSStream, +) + +# if TYPE_CHECKING: +# from tractor._addr import Address + + +Address = TCPAddress|UDSAddress + +# manually updated list of all supported msg transport types +_msg_transports = [ + MsgpackTCPStream, + MsgpackUDSStream +] + + +# convert a MsgTransportKey to the corresponding transport type +_key_to_transport: dict[ + MsgTransportKey, + Type[MsgTransport], +] = { + ('msgpack', 'tcp'): MsgpackTCPStream, + ('msgpack', 'uds'): MsgpackUDSStream, +} + +# convert an Address wrapper to its corresponding transport type +_addr_to_transport: dict[ + Type[TCPAddress|UDSAddress], + Type[MsgTransport] +] = { + TCPAddress: MsgpackTCPStream, + UDSAddress: MsgpackUDSStream, +} + + +def transport_from_addr( + addr: Address, + codec_key: str = 'msgpack', +) -> Type[MsgTransport]: + ''' + Given a destination address and a desired codec, find the + corresponding `MsgTransport` type. + + ''' + try: + return _addr_to_transport[type(addr)] + + except KeyError: + raise NotImplementedError( + f'No known transport for address {repr(addr)}' + ) + + +def transport_from_stream( + stream: trio.abc.Stream, + codec_key: str = 'msgpack' +) -> Type[MsgTransport]: + ''' + Given an arbitrary `trio.abc.Stream` and a desired codec, + find the corresponding `MsgTransport` type. + + ''' + transport = None + if isinstance(stream, trio.SocketStream): + sock: socket.socket = stream.socket + match sock.family: + case socket.AF_INET | socket.AF_INET6: + transport = 'tcp' + + case socket.AF_UNIX: + transport = 'uds' + + case _: + raise NotImplementedError( + f'Unsupported socket family: {sock.family}' + ) + + if not transport: + raise NotImplementedError( + f'Could not figure out transport type for stream type {type(stream)}' + ) + + key = (codec_key, transport) + + return _key_to_transport[key] diff --git a/tractor/ipc/_uds.py b/tractor/ipc/_uds.py new file mode 100644 index 00000000..604802f3 --- /dev/null +++ b/tractor/ipc/_uds.py @@ -0,0 +1,422 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +Unix Domain Socket implementation of tractor.ipc._transport.MsgTransport protocol + +''' +from __future__ import annotations +from pathlib import Path +import os +from socket import ( + AF_UNIX, + SOCK_STREAM, + SO_PASSCRED, + SO_PEERCRED, + SOL_SOCKET, +) +import struct +from typing import ( + TYPE_CHECKING, + ClassVar, +) + +import msgspec +import trio +from trio import ( + socket, + SocketListener, +) +from trio._highlevel_open_unix_stream import ( + close_on_error, + has_unix, +) + +from tractor.msg import MsgCodec +from tractor.log import get_logger +from tractor.ipc._transport import ( + MsgpackTransport, +) +from .._state import ( + get_rt_dir, + current_actor, + is_root_process, +) + +if TYPE_CHECKING: + from ._runtime import Actor + + +log = get_logger(__name__) + + +def unwrap_sockpath( + sockpath: Path, +) -> tuple[Path, Path]: + return ( + sockpath.parent, + sockpath.name, + ) + + +class UDSAddress( + msgspec.Struct, + frozen=True, +): + filedir: str|Path|None + filename: str|Path + maybe_pid: int|None = None + + # TODO, maybe we should use better field and value + # -[x] really this is a `.protocol_key` not a "name" of anything. + # -[ ] consider a 'unix' proto-key instead? + # -[ ] need to check what other mult-transport frameworks do + # like zmq, nng, uri-spec et al! + proto_key: ClassVar[str] = 'uds' + unwrapped_type: ClassVar[type] = tuple[str, int] + def_bindspace: ClassVar[Path] = get_rt_dir() + + @property + def bindspace(self) -> Path: + ''' + We replicate the "ip-set-of-hosts" part of a UDS socket as + just the sub-directory in which we allocate socket files. + + ''' + return ( + self.filedir + or + self.def_bindspace + # or + # get_rt_dir() + ) + + @property + def sockpath(self) -> Path: + return self.bindspace / self.filename + + @property + def is_valid(self) -> bool: + ''' + We block socket files not allocated under the runtime subdir. + + ''' + return self.bindspace in self.sockpath.parents + + @classmethod + def from_addr( + cls, + addr: ( + tuple[Path|str, Path|str]|Path|str + ), + ) -> UDSAddress: + match addr: + case tuple()|list(): + filedir = Path(addr[0]) + filename = Path(addr[1]) + return UDSAddress( + filedir=filedir, + filename=filename, + # maybe_pid=pid, + ) + # NOTE, in case we ever decide to just `.unwrap()` + # to a `Path|str`? + case str()|Path(): + sockpath: Path = Path(addr) + return UDSAddress(*unwrap_sockpath(sockpath)) + case _: + # import pdbp; pdbp.set_trace() + raise TypeError( + f'Bad unwrapped-address for {cls} !\n' + f'{addr!r}\n' + ) + + def unwrap(self) -> tuple[str, int]: + # XXX NOTE, since this gets passed DIRECTLY to + # `.ipc._uds.open_unix_socket_w_passcred()` + return ( + str(self.filedir), + str(self.filename), + ) + + @classmethod + def get_random( + cls, + bindspace: Path|None = None, # default netns + ) -> UDSAddress: + + filedir: Path = bindspace or cls.def_bindspace + pid: int = os.getpid() + actor: Actor|None = current_actor( + err_on_no_runtime=False, + ) + if actor: + sockname: str = '::'.join(actor.uid) + f'@{pid}' + else: + prefix: str = '' + if is_root_process(): + prefix: str = 'root' + sockname: str = f'{prefix}@{pid}' + + sockpath: Path = Path(f'{sockname}.sock') + return UDSAddress( + filedir=filedir, + filename=sockpath, + maybe_pid=pid, + ) + + @classmethod + def get_root(cls) -> UDSAddress: + def_uds_filename: Path = 'registry@1616.sock' + return UDSAddress( + filedir=cls.def_bindspace, + filename=def_uds_filename, + # maybe_pid=1616, + ) + + # ?TODO, maybe we should just our .msg.pretty_struct.Struct` for + # this instead? + # -[ ] is it too "multi-line"y tho? + # the compact tuple/.unwrapped() form is simple enough? + # + def __repr__(self) -> str: + if not (pid := self.maybe_pid): + pid: str = '' + + body: str = ( + f'({self.filedir}, {self.filename}, {pid})' + ) + return ( + f'{type(self).__name__}' + f'[' + f'{body}' + f']' + ) + + +async def start_listener( + addr: UDSAddress, + **kwargs, +) -> SocketListener: + # sock = addr._sock = socket.socket( + sock = socket.socket( + socket.AF_UNIX, + socket.SOCK_STREAM + ) + log.info( + f'Attempting to bind UDS socket\n' + f'>[\n' + f'|_{addr}\n' + ) + + bindpath: Path = addr.sockpath + try: + await sock.bind(str(bindpath)) + except ( + FileNotFoundError, + ) as fdne: + raise ConnectionError( + f'Bad UDS socket-filepath-as-address ??\n' + f'{addr}\n' + f' |_sockpath: {addr.sockpath}\n' + ) from fdne + + sock.listen(1) + log.info( + f'Listening on UDS socket\n' + f'[>\n' + f' |_{addr}\n' + ) + return SocketListener(sock) + + +def close_listener( + addr: UDSAddress, + lstnr: SocketListener, +) -> None: + ''' + Close and remove the listening unix socket's path. + + ''' + lstnr.socket.close() + os.unlink(addr.sockpath) + + +async def open_unix_socket_w_passcred( + filename: str|bytes|os.PathLike[str]|os.PathLike[bytes], +) -> trio.SocketStream: + ''' + Literally the exact same as `trio.open_unix_socket()` except we set the additiona + `socket.SO_PASSCRED` option to ensure the server side (the process calling `accept()`) + can extract the connecting peer's credentials, namely OS specific process + related IDs. + + See this SO for "why" the extra opts, + - https://stackoverflow.com/a/7982749 + + ''' + if not has_unix: + raise RuntimeError("Unix sockets are not supported on this platform") + + # much more simplified logic vs tcp sockets - one socket type and only one + # possible location to connect to + sock = trio.socket.socket(AF_UNIX, SOCK_STREAM) + sock.setsockopt(SOL_SOCKET, SO_PASSCRED, 1) + with close_on_error(sock): + await sock.connect(os.fspath(filename)) + + return trio.SocketStream(sock) + + +def get_peer_info(sock: trio.socket.socket) -> tuple[ + int, # pid + int, # uid + int, # guid +]: + ''' + Deliver the connecting peer's "credentials"-info as defined in + a very Linux specific way.. + + For more deats see, + - `man accept`, + - `man unix`, + + this great online guide to all things sockets, + - https://beej.us/guide/bgnet/html/split-wide/man-pages.html#setsockoptman + + AND this **wonderful SO answer** + - https://stackoverflow.com/a/7982749 + + ''' + creds: bytes = sock.getsockopt( + SOL_SOCKET, + SO_PEERCRED, + struct.calcsize('3i') + ) + # i.e a tuple of the fields, + # pid: int, "process" + # uid: int, "user" + # gid: int, "group" + return struct.unpack('3i', creds) + + +class MsgpackUDSStream(MsgpackTransport): + ''' + A `trio.SocketStream` around a Unix-Domain-Socket transport + delivering `msgpack` encoded msgs using the `msgspec` codec lib. + + ''' + address_type = UDSAddress + layer_key: int = 4 + + @property + def maddr(self) -> str: + if not self.raddr: + return '' + + filepath: Path = Path(self.raddr.unwrap()[0]) + return ( + f'/{self.address_type.proto_key}/{filepath}' + # f'/{self.chan.uid[0]}' + # f'/{self.cid}' + + # f'/cid={cid_head}..{cid_tail}' + # TODO: ? not use this ^ right ? + ) + + def connected(self) -> bool: + return self.stream.socket.fileno() != -1 + + @classmethod + async def connect_to( + cls, + addr: UDSAddress, + prefix_size: int = 4, + codec: MsgCodec|None = None, + **kwargs + ) -> MsgpackUDSStream: + + + sockpath: Path = addr.sockpath + # + # ^XXX NOTE, we don't provide any out-of-band `.pid` info + # (like, over the socket as extra msgs) since the (augmented) + # `.setsockopt()` call tells the OS provide it; the client + # pid can then be read on server/listen() side via + # `get_peer_info()` above. + try: + stream = await open_unix_socket_w_passcred( + str(sockpath), + **kwargs + ) + except ( + FileNotFoundError, + ) as fdne: + raise ConnectionError( + f'Bad UDS socket-filepath-as-address ??\n' + f'{addr}\n' + f' |_sockpath: {sockpath}\n' + ) from fdne + + stream = MsgpackUDSStream( + stream, + prefix_size=prefix_size, + codec=codec + ) + stream._raddr = addr + return stream + + @classmethod + def get_stream_addrs( + cls, + stream: trio.SocketStream + ) -> tuple[ + Path, + int, + ]: + sock: trio.socket.socket = stream.socket + + # NOTE XXX, it's unclear why one or the other ends up being + # `bytes` versus the socket-file-path, i presume it's + # something to do with who is the server (called `.listen()`)? + # maybe could be better implemented using another info-query + # on the socket like, + # https://beej.us/guide/bgnet/html/split-wide/system-calls-or-bust.html#gethostnamewho-am-i + sockname: str|bytes = sock.getsockname() + # https://beej.us/guide/bgnet/html/split-wide/system-calls-or-bust.html#getpeernamewho-are-you + peername: str|bytes = sock.getpeername() + match (peername, sockname): + case (str(), bytes()): + sock_path: Path = Path(peername) + case (bytes(), str()): + sock_path: Path = Path(sockname) + ( + peer_pid, + _, + _, + ) = get_peer_info(sock) + + filedir, filename = unwrap_sockpath(sock_path) + laddr = UDSAddress( + filedir=filedir, + filename=filename, + maybe_pid=os.getpid(), + ) + raddr = UDSAddress( + filedir=filedir, + filename=filename, + maybe_pid=peer_pid + ) + return (laddr, raddr) diff --git a/tractor/log.py b/tractor/log.py index 74e0321b..393c9571 100644 --- a/tractor/log.py +++ b/tractor/log.py @@ -92,7 +92,7 @@ class StackLevelAdapter(LoggerAdapter): ) -> None: ''' IPC transport level msg IO; generally anything below - `._ipc.Channel` and friends. + `.ipc.Channel` and friends. ''' return self.log(5, msg) @@ -270,7 +270,9 @@ def get_logger( subsys_spec: str|None = None, ) -> StackLevelAdapter: - '''Return the package log or a sub-logger for ``name`` if provided. + ''' + Return the `tractor`-library root logger or a sub-logger for + `name` if provided. ''' log: Logger @@ -282,10 +284,10 @@ def get_logger( name != _proj_name ): - # NOTE: for handling for modules that use ``get_logger(__name__)`` + # NOTE: for handling for modules that use `get_logger(__name__)` # we make the following stylistic choice: # - always avoid duplicate project-package token - # in msg output: i.e. tractor.tractor _ipc.py in header + # in msg output: i.e. tractor.tractor.ipc._chan.py in header # looks ridiculous XD # - never show the leaf module name in the {name} part # since in python the {filename} is always this same @@ -331,7 +333,7 @@ def get_logger( def get_console_log( level: str|None = None, - logger: Logger|None = None, + logger: Logger|StackLevelAdapter|None = None, **kwargs, ) -> LoggerAdapter: @@ -344,12 +346,23 @@ def get_console_log( Yeah yeah, i know we can use `logging.config.dictConfig()`. You do it. ''' - log = get_logger( - logger=logger, - **kwargs - ) # set a root logger - logger: Logger = log.logger + # get/create a stack-aware-adapter + if ( + logger + and + isinstance(logger, StackLevelAdapter) + ): + # XXX, for ex. when passed in by a caller wrapping some + # other lib's logger instance with our level-adapter. + log = logger + else: + log: StackLevelAdapter = get_logger( + logger=logger, + **kwargs + ) + + logger: Logger|StackLevelAdapter = log.logger if not level: return log @@ -367,10 +380,7 @@ def get_console_log( None, ) ): - fmt = LOG_FORMAT - # if logger: - # fmt = None - + fmt: str = LOG_FORMAT # always apply our format? handler = StreamHandler() formatter = colorlog.ColoredFormatter( fmt=fmt, diff --git a/tractor/msg/_ops.py b/tractor/msg/_ops.py index fbbbecff..9a9c9914 100644 --- a/tractor/msg/_ops.py +++ b/tractor/msg/_ops.py @@ -608,7 +608,7 @@ async def drain_to_final_msg( # # -[ ] make sure pause points work here for REPLing # the runtime itself; i.e. ensure there's no hangs! - # |_from tractor.devx._debug import pause + # |_from tractor.devx.debug import pause # await pause() # NOTE: we get here if the far end was diff --git a/tractor/msg/types.py b/tractor/msg/types.py index 1cc8b78e..aaf8d137 100644 --- a/tractor/msg/types.py +++ b/tractor/msg/types.py @@ -31,6 +31,7 @@ from typing import ( Type, TypeVar, TypeAlias, + # TYPE_CHECKING, Union, ) @@ -47,6 +48,7 @@ from tractor.msg import ( pretty_struct, ) from tractor.log import get_logger +# from tractor._addr import UnwrappedAddress log = get_logger('tractor.msgspec') @@ -141,9 +143,16 @@ class Aid( ''' name: str uuid: str - # TODO: use built-in support for UUIDs? - # -[ ] `uuid.UUID` which has multi-protocol support - # https://jcristharif.com/msgspec/supported-types.html#uuid + pid: int|None = None + + # TODO? can/should we extend this field set? + # -[ ] use built-in support for UUIDs? `uuid.UUID` which has + # multi-protocol support + # https://jcristharif.com/msgspec/supported-types.html#uuid + # + # -[ ] as per the `.ipc._uds` / `._addr` comments, maybe we + # should also include at least `.pid` (equiv to port for tcp) + # and/or host-part always? class SpawnSpec( @@ -161,14 +170,15 @@ class SpawnSpec( # a hard `Struct` def for all of these fields! _parent_main_data: dict _runtime_vars: dict[str, Any] + # ^NOTE see `._state._runtime_vars: dict` # module import capability enable_modules: dict[str, str] # TODO: not just sockaddr pairs? # -[ ] abstract into a `TransportAddr` type? - reg_addrs: list[tuple[str, int]] - bind_addrs: list[tuple[str, int]] + reg_addrs: list[tuple[str, str|int]] + bind_addrs: list[tuple[str, str|int]]|None # TODO: caps based RPC support in the payload? diff --git a/tractor/to_asyncio.py b/tractor/to_asyncio.py index 08b1ed25..04635c5b 100644 --- a/tractor/to_asyncio.py +++ b/tractor/to_asyncio.py @@ -49,7 +49,7 @@ from tractor._state import ( _runtime_vars, ) from tractor._context import Unresolved -from tractor.devx import _debug +from tractor.devx import debug from tractor.log import ( get_logger, StackLevelAdapter, @@ -479,12 +479,12 @@ def _run_asyncio_task( if ( debug_mode() and - (greenback := _debug.maybe_import_greenback( + (greenback := debug.maybe_import_greenback( force_reload=True, raise_not_found=False, )) ): - log.info( + log.devx( f'Bestowing `greenback` portal for `asyncio`-task\n' f'{task}\n' ) @@ -841,7 +841,7 @@ async def translate_aio_errors( except BaseException as _trio_err: trio_err = chan._trio_err = _trio_err # await tractor.pause(shield=True) # workx! - entered: bool = await _debug._maybe_enter_pm( + entered: bool = await debug._maybe_enter_pm( trio_err, api_frame=inspect.currentframe(), ) @@ -1406,7 +1406,7 @@ def run_as_asyncio_guest( ) # XXX make it obvi we know this isn't supported yet! assert 0 - # await _debug.maybe_init_greenback( + # await debug.maybe_init_greenback( # force_reload=True, # ) diff --git a/tractor/trionics/__init__.py b/tractor/trionics/__init__.py index df9b6f26..42f675b2 100644 --- a/tractor/trionics/__init__.py +++ b/tractor/trionics/__init__.py @@ -31,4 +31,5 @@ from ._broadcast import ( ) from ._beg import ( collapse_eg as collapse_eg, + maybe_collapse_eg as maybe_collapse_eg, ) diff --git a/tractor/trionics/_mngrs.py b/tractor/trionics/_mngrs.py index 9a5ed156..24b4fde8 100644 --- a/tractor/trionics/_mngrs.py +++ b/tractor/trionics/_mngrs.py @@ -70,7 +70,8 @@ async def maybe_open_nursery( yield nursery else: async with lib.open_nursery(**kwargs) as nursery: - nursery.cancel_scope.shield = shield + if lib == trio: + nursery.cancel_scope.shield = shield yield nursery diff --git a/uv.lock b/uv.lock index e1c409f5..3c05dc2f 100644 --- a/uv.lock +++ b/uv.lock @@ -1,14 +1,23 @@ version = 1 -revision = 1 +revision = 2 requires-python = ">=3.11" [[package]] name = "attrs" version = "24.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984 } +sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984, upload-time = "2024-12-16T06:59:29.899Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397 }, + { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397, upload-time = "2024-12-16T06:59:26.977Z" }, +] + +[[package]] +name = "bidict" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/6e/026678aa5a830e07cd9498a05d3e7e650a4f56a42f267a53d22bcda1bdc9/bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71", size = 29093, upload-time = "2024-02-18T19:09:05.748Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764, upload-time = "2024-02-18T19:09:04.156Z" }, ] [[package]] @@ -18,23 +27,51 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pycparser" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 }, - { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 }, - { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, - { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] @@ -44,9 +81,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d3/7a/359f4d5df2353f26172b3cc39ea32daa39af8de522205f512f458923e677/colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2", size = 16624 } +sdist = { url = "https://files.pythonhosted.org/packages/d3/7a/359f4d5df2353f26172b3cc39ea32daa39af8de522205f512f458923e677/colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2", size = 16624, upload-time = "2024-10-29T18:34:51.011Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424 }, + { url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424, upload-time = "2024-10-29T18:34:49.815Z" }, ] [[package]] @@ -58,98 +95,98 @@ dependencies = [ { name = "outcome" }, { name = "sniffio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dc/c1/ab3a42c0f3ed56df9cd33de1539b3198d98c6ccbaf88a73d6be0b72d85e0/greenback-1.2.1.tar.gz", hash = "sha256:de3ca656885c03b96dab36079f3de74bb5ba061da9bfe3bb69dccc866ef95ea3", size = 42597 } +sdist = { url = "https://files.pythonhosted.org/packages/dc/c1/ab3a42c0f3ed56df9cd33de1539b3198d98c6ccbaf88a73d6be0b72d85e0/greenback-1.2.1.tar.gz", hash = "sha256:de3ca656885c03b96dab36079f3de74bb5ba061da9bfe3bb69dccc866ef95ea3", size = 42597, upload-time = "2024-02-20T21:23:13.239Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/d0/b8dc79d5ecfffacad9c844b6ae76b9c6259935796d3c561deccbf8fa421d/greenback-1.2.1-py3-none-any.whl", hash = "sha256:98768edbbe4340091a9730cf64a683fcbaa3f2cb81e4ac41d7ed28d3b6f74b79", size = 28062 }, + { url = "https://files.pythonhosted.org/packages/71/d0/b8dc79d5ecfffacad9c844b6ae76b9c6259935796d3c561deccbf8fa421d/greenback-1.2.1-py3-none-any.whl", hash = "sha256:98768edbbe4340091a9730cf64a683fcbaa3f2cb81e4ac41d7ed28d3b6f74b79", size = 28062, upload-time = "2024-02-20T21:23:12.031Z" }, ] [[package]] name = "greenlet" version = "3.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022 } +sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022, upload-time = "2024-09-20T18:21:04.506Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479 }, - { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404 }, - { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813 }, - { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517 }, - { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831 }, - { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413 }, - { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619 }, - { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198 }, - { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930 }, - { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260 }, - { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064 }, - { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420 }, - { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035 }, - { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105 }, - { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077 }, - { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, - { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, - { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, - { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, - { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, - { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, - { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, - { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, - { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, - { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, - { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, - { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, - { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, - { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, - { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, - { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, - { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, - { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, - { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, + { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479, upload-time = "2024-09-20T17:07:22.332Z" }, + { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404, upload-time = "2024-09-20T17:36:45.588Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813, upload-time = "2024-09-20T17:39:19.052Z" }, + { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517, upload-time = "2024-09-20T17:44:24.101Z" }, + { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831, upload-time = "2024-09-20T17:08:40.577Z" }, + { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413, upload-time = "2024-09-20T17:08:31.728Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619, upload-time = "2024-09-20T17:44:14.222Z" }, + { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198, upload-time = "2024-09-20T17:09:23.903Z" }, + { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930, upload-time = "2024-09-20T17:25:18.656Z" }, + { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260, upload-time = "2024-09-20T17:08:07.301Z" }, + { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064, upload-time = "2024-09-20T17:36:47.628Z" }, + { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420, upload-time = "2024-09-20T17:39:21.258Z" }, + { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035, upload-time = "2024-09-20T17:44:26.501Z" }, + { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105, upload-time = "2024-09-20T17:08:42.048Z" }, + { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077, upload-time = "2024-09-20T17:08:33.707Z" }, + { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975, upload-time = "2024-09-20T17:44:15.989Z" }, + { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955, upload-time = "2024-09-20T17:09:25.539Z" }, + { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655, upload-time = "2024-09-20T17:21:22.427Z" }, + { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990, upload-time = "2024-09-20T17:08:26.312Z" }, + { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175, upload-time = "2024-09-20T17:36:48.983Z" }, + { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425, upload-time = "2024-09-20T17:39:22.705Z" }, + { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736, upload-time = "2024-09-20T17:44:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347, upload-time = "2024-09-20T17:08:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583, upload-time = "2024-09-20T17:08:36.85Z" }, + { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039, upload-time = "2024-09-20T17:44:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716, upload-time = "2024-09-20T17:09:27.112Z" }, + { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490, upload-time = "2024-09-20T17:17:09.501Z" }, + { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731, upload-time = "2024-09-20T17:36:50.376Z" }, + { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304, upload-time = "2024-09-20T17:39:24.55Z" }, + { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537, upload-time = "2024-09-20T17:44:31.102Z" }, + { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506, upload-time = "2024-09-20T17:08:47.852Z" }, + { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753, upload-time = "2024-09-20T17:08:38.079Z" }, + { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731, upload-time = "2024-09-20T17:44:20.556Z" }, + { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112, upload-time = "2024-09-20T17:09:28.753Z" }, ] [[package]] name = "idna" version = "3.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, ] [[package]] name = "iniconfig" version = "2.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" }, ] [[package]] name = "msgspec" version = "0.19.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cf/9b/95d8ce458462b8b71b8a70fa94563b2498b89933689f3a7b8911edfae3d7/msgspec-0.19.0.tar.gz", hash = "sha256:604037e7cd475345848116e89c553aa9a233259733ab51986ac924ab1b976f8e", size = 216934 } +sdist = { url = "https://files.pythonhosted.org/packages/cf/9b/95d8ce458462b8b71b8a70fa94563b2498b89933689f3a7b8911edfae3d7/msgspec-0.19.0.tar.gz", hash = "sha256:604037e7cd475345848116e89c553aa9a233259733ab51986ac924ab1b976f8e", size = 216934, upload-time = "2024-12-27T17:40:28.597Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/24/d4/2ec2567ac30dab072cce3e91fb17803c52f0a37aab6b0c24375d2b20a581/msgspec-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa77046904db764b0462036bc63ef71f02b75b8f72e9c9dd4c447d6da1ed8f8e", size = 187939 }, - { url = "https://files.pythonhosted.org/packages/2b/c0/18226e4328897f4f19875cb62bb9259fe47e901eade9d9376ab5f251a929/msgspec-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:047cfa8675eb3bad68722cfe95c60e7afabf84d1bd8938979dd2b92e9e4a9551", size = 182202 }, - { url = "https://files.pythonhosted.org/packages/81/25/3a4b24d468203d8af90d1d351b77ea3cffb96b29492855cf83078f16bfe4/msgspec-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e78f46ff39a427e10b4a61614a2777ad69559cc8d603a7c05681f5a595ea98f7", size = 209029 }, - { url = "https://files.pythonhosted.org/packages/85/2e/db7e189b57901955239f7689b5dcd6ae9458637a9c66747326726c650523/msgspec-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c7adf191e4bd3be0e9231c3b6dc20cf1199ada2af523885efc2ed218eafd011", size = 210682 }, - { url = "https://files.pythonhosted.org/packages/03/97/7c8895c9074a97052d7e4a1cc1230b7b6e2ca2486714eb12c3f08bb9d284/msgspec-0.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f04cad4385e20be7c7176bb8ae3dca54a08e9756cfc97bcdb4f18560c3042063", size = 214003 }, - { url = "https://files.pythonhosted.org/packages/61/61/e892997bcaa289559b4d5869f066a8021b79f4bf8e955f831b095f47a4cd/msgspec-0.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45c8fb410670b3b7eb884d44a75589377c341ec1392b778311acdbfa55187716", size = 216833 }, - { url = "https://files.pythonhosted.org/packages/ce/3d/71b2dffd3a1c743ffe13296ff701ee503feaebc3f04d0e75613b6563c374/msgspec-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:70eaef4934b87193a27d802534dc466778ad8d536e296ae2f9334e182ac27b6c", size = 186184 }, - { url = "https://files.pythonhosted.org/packages/b2/5f/a70c24f075e3e7af2fae5414c7048b0e11389685b7f717bb55ba282a34a7/msgspec-0.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f98bd8962ad549c27d63845b50af3f53ec468b6318400c9f1adfe8b092d7b62f", size = 190485 }, - { url = "https://files.pythonhosted.org/packages/89/b0/1b9763938cfae12acf14b682fcf05c92855974d921a5a985ecc197d1c672/msgspec-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:43bbb237feab761b815ed9df43b266114203f53596f9b6e6f00ebd79d178cdf2", size = 183910 }, - { url = "https://files.pythonhosted.org/packages/87/81/0c8c93f0b92c97e326b279795f9c5b956c5a97af28ca0fbb9fd86c83737a/msgspec-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cfc033c02c3e0aec52b71710d7f84cb3ca5eb407ab2ad23d75631153fdb1f12", size = 210633 }, - { url = "https://files.pythonhosted.org/packages/d0/ef/c5422ce8af73928d194a6606f8ae36e93a52fd5e8df5abd366903a5ca8da/msgspec-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d911c442571605e17658ca2b416fd8579c5050ac9adc5e00c2cb3126c97f73bc", size = 213594 }, - { url = "https://files.pythonhosted.org/packages/19/2b/4137bc2ed45660444842d042be2cf5b18aa06efd2cda107cff18253b9653/msgspec-0.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:757b501fa57e24896cf40a831442b19a864f56d253679f34f260dcb002524a6c", size = 214053 }, - { url = "https://files.pythonhosted.org/packages/9d/e6/8ad51bdc806aac1dc501e8fe43f759f9ed7284043d722b53323ea421c360/msgspec-0.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5f0f65f29b45e2816d8bded36e6b837a4bf5fb60ec4bc3c625fa2c6da4124537", size = 219081 }, - { url = "https://files.pythonhosted.org/packages/b1/ef/27dd35a7049c9a4f4211c6cd6a8c9db0a50647546f003a5867827ec45391/msgspec-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:067f0de1c33cfa0b6a8206562efdf6be5985b988b53dd244a8e06f993f27c8c0", size = 187467 }, - { url = "https://files.pythonhosted.org/packages/3c/cb/2842c312bbe618d8fefc8b9cedce37f773cdc8fa453306546dba2c21fd98/msgspec-0.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f12d30dd6266557aaaf0aa0f9580a9a8fbeadfa83699c487713e355ec5f0bd86", size = 190498 }, - { url = "https://files.pythonhosted.org/packages/58/95/c40b01b93465e1a5f3b6c7d91b10fb574818163740cc3acbe722d1e0e7e4/msgspec-0.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82b2c42c1b9ebc89e822e7e13bbe9d17ede0c23c187469fdd9505afd5a481314", size = 183950 }, - { url = "https://files.pythonhosted.org/packages/e8/f0/5b764e066ce9aba4b70d1db8b087ea66098c7c27d59b9dd8a3532774d48f/msgspec-0.19.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19746b50be214a54239aab822964f2ac81e38b0055cca94808359d779338c10e", size = 210647 }, - { url = "https://files.pythonhosted.org/packages/9d/87/bc14f49bc95c4cb0dd0a8c56028a67c014ee7e6818ccdce74a4862af259b/msgspec-0.19.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60ef4bdb0ec8e4ad62e5a1f95230c08efb1f64f32e6e8dd2ced685bcc73858b5", size = 213563 }, - { url = "https://files.pythonhosted.org/packages/53/2f/2b1c2b056894fbaa975f68f81e3014bb447516a8b010f1bed3fb0e016ed7/msgspec-0.19.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac7f7c377c122b649f7545810c6cd1b47586e3aa3059126ce3516ac7ccc6a6a9", size = 213996 }, - { url = "https://files.pythonhosted.org/packages/aa/5a/4cd408d90d1417e8d2ce6a22b98a6853c1b4d7cb7669153e4424d60087f6/msgspec-0.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5bc1472223a643f5ffb5bf46ccdede7f9795078194f14edd69e3aab7020d327", size = 219087 }, - { url = "https://files.pythonhosted.org/packages/23/d8/f15b40611c2d5753d1abb0ca0da0c75348daf1252220e5dda2867bd81062/msgspec-0.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:317050bc0f7739cb30d257ff09152ca309bf5a369854bbf1e57dffc310c1f20f", size = 187432 }, + { url = "https://files.pythonhosted.org/packages/24/d4/2ec2567ac30dab072cce3e91fb17803c52f0a37aab6b0c24375d2b20a581/msgspec-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa77046904db764b0462036bc63ef71f02b75b8f72e9c9dd4c447d6da1ed8f8e", size = 187939, upload-time = "2024-12-27T17:39:32.347Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/18226e4328897f4f19875cb62bb9259fe47e901eade9d9376ab5f251a929/msgspec-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:047cfa8675eb3bad68722cfe95c60e7afabf84d1bd8938979dd2b92e9e4a9551", size = 182202, upload-time = "2024-12-27T17:39:33.633Z" }, + { url = "https://files.pythonhosted.org/packages/81/25/3a4b24d468203d8af90d1d351b77ea3cffb96b29492855cf83078f16bfe4/msgspec-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e78f46ff39a427e10b4a61614a2777ad69559cc8d603a7c05681f5a595ea98f7", size = 209029, upload-time = "2024-12-27T17:39:35.023Z" }, + { url = "https://files.pythonhosted.org/packages/85/2e/db7e189b57901955239f7689b5dcd6ae9458637a9c66747326726c650523/msgspec-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c7adf191e4bd3be0e9231c3b6dc20cf1199ada2af523885efc2ed218eafd011", size = 210682, upload-time = "2024-12-27T17:39:36.384Z" }, + { url = "https://files.pythonhosted.org/packages/03/97/7c8895c9074a97052d7e4a1cc1230b7b6e2ca2486714eb12c3f08bb9d284/msgspec-0.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f04cad4385e20be7c7176bb8ae3dca54a08e9756cfc97bcdb4f18560c3042063", size = 214003, upload-time = "2024-12-27T17:39:39.097Z" }, + { url = "https://files.pythonhosted.org/packages/61/61/e892997bcaa289559b4d5869f066a8021b79f4bf8e955f831b095f47a4cd/msgspec-0.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45c8fb410670b3b7eb884d44a75589377c341ec1392b778311acdbfa55187716", size = 216833, upload-time = "2024-12-27T17:39:41.203Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3d/71b2dffd3a1c743ffe13296ff701ee503feaebc3f04d0e75613b6563c374/msgspec-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:70eaef4934b87193a27d802534dc466778ad8d536e296ae2f9334e182ac27b6c", size = 186184, upload-time = "2024-12-27T17:39:43.702Z" }, + { url = "https://files.pythonhosted.org/packages/b2/5f/a70c24f075e3e7af2fae5414c7048b0e11389685b7f717bb55ba282a34a7/msgspec-0.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f98bd8962ad549c27d63845b50af3f53ec468b6318400c9f1adfe8b092d7b62f", size = 190485, upload-time = "2024-12-27T17:39:44.974Z" }, + { url = "https://files.pythonhosted.org/packages/89/b0/1b9763938cfae12acf14b682fcf05c92855974d921a5a985ecc197d1c672/msgspec-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:43bbb237feab761b815ed9df43b266114203f53596f9b6e6f00ebd79d178cdf2", size = 183910, upload-time = "2024-12-27T17:39:46.401Z" }, + { url = "https://files.pythonhosted.org/packages/87/81/0c8c93f0b92c97e326b279795f9c5b956c5a97af28ca0fbb9fd86c83737a/msgspec-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cfc033c02c3e0aec52b71710d7f84cb3ca5eb407ab2ad23d75631153fdb1f12", size = 210633, upload-time = "2024-12-27T17:39:49.099Z" }, + { url = "https://files.pythonhosted.org/packages/d0/ef/c5422ce8af73928d194a6606f8ae36e93a52fd5e8df5abd366903a5ca8da/msgspec-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d911c442571605e17658ca2b416fd8579c5050ac9adc5e00c2cb3126c97f73bc", size = 213594, upload-time = "2024-12-27T17:39:51.204Z" }, + { url = "https://files.pythonhosted.org/packages/19/2b/4137bc2ed45660444842d042be2cf5b18aa06efd2cda107cff18253b9653/msgspec-0.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:757b501fa57e24896cf40a831442b19a864f56d253679f34f260dcb002524a6c", size = 214053, upload-time = "2024-12-27T17:39:52.866Z" }, + { url = "https://files.pythonhosted.org/packages/9d/e6/8ad51bdc806aac1dc501e8fe43f759f9ed7284043d722b53323ea421c360/msgspec-0.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5f0f65f29b45e2816d8bded36e6b837a4bf5fb60ec4bc3c625fa2c6da4124537", size = 219081, upload-time = "2024-12-27T17:39:55.142Z" }, + { url = "https://files.pythonhosted.org/packages/b1/ef/27dd35a7049c9a4f4211c6cd6a8c9db0a50647546f003a5867827ec45391/msgspec-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:067f0de1c33cfa0b6a8206562efdf6be5985b988b53dd244a8e06f993f27c8c0", size = 187467, upload-time = "2024-12-27T17:39:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/3c/cb/2842c312bbe618d8fefc8b9cedce37f773cdc8fa453306546dba2c21fd98/msgspec-0.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f12d30dd6266557aaaf0aa0f9580a9a8fbeadfa83699c487713e355ec5f0bd86", size = 190498, upload-time = "2024-12-27T17:40:00.427Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/c40b01b93465e1a5f3b6c7d91b10fb574818163740cc3acbe722d1e0e7e4/msgspec-0.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82b2c42c1b9ebc89e822e7e13bbe9d17ede0c23c187469fdd9505afd5a481314", size = 183950, upload-time = "2024-12-27T17:40:04.219Z" }, + { url = "https://files.pythonhosted.org/packages/e8/f0/5b764e066ce9aba4b70d1db8b087ea66098c7c27d59b9dd8a3532774d48f/msgspec-0.19.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19746b50be214a54239aab822964f2ac81e38b0055cca94808359d779338c10e", size = 210647, upload-time = "2024-12-27T17:40:05.606Z" }, + { url = "https://files.pythonhosted.org/packages/9d/87/bc14f49bc95c4cb0dd0a8c56028a67c014ee7e6818ccdce74a4862af259b/msgspec-0.19.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60ef4bdb0ec8e4ad62e5a1f95230c08efb1f64f32e6e8dd2ced685bcc73858b5", size = 213563, upload-time = "2024-12-27T17:40:10.516Z" }, + { url = "https://files.pythonhosted.org/packages/53/2f/2b1c2b056894fbaa975f68f81e3014bb447516a8b010f1bed3fb0e016ed7/msgspec-0.19.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac7f7c377c122b649f7545810c6cd1b47586e3aa3059126ce3516ac7ccc6a6a9", size = 213996, upload-time = "2024-12-27T17:40:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/aa/5a/4cd408d90d1417e8d2ce6a22b98a6853c1b4d7cb7669153e4424d60087f6/msgspec-0.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5bc1472223a643f5ffb5bf46ccdede7f9795078194f14edd69e3aab7020d327", size = 219087, upload-time = "2024-12-27T17:40:14.881Z" }, + { url = "https://files.pythonhosted.org/packages/23/d8/f15b40611c2d5753d1abb0ca0da0c75348daf1252220e5dda2867bd81062/msgspec-0.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:317050bc0f7739cb30d257ff09152ca309bf5a369854bbf1e57dffc310c1f20f", size = 187432, upload-time = "2024-12-27T17:40:16.256Z" }, ] [[package]] @@ -159,18 +196,18 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/98/df/77698abfac98571e65ffeb0c1fba8ffd692ab8458d617a0eed7d9a8d38f2/outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8", size = 21060 } +sdist = { url = "https://files.pythonhosted.org/packages/98/df/77698abfac98571e65ffeb0c1fba8ffd692ab8458d617a0eed7d9a8d38f2/outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8", size = 21060, upload-time = "2023-10-26T04:26:04.361Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/8b/5ab7257531a5d830fc8000c476e63c935488d74609b50f9384a643ec0a62/outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b", size = 10692 }, + { url = "https://files.pythonhosted.org/packages/55/8b/5ab7257531a5d830fc8000c476e63c935488d74609b50f9384a643ec0a62/outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b", size = 10692, upload-time = "2023-10-26T04:26:02.532Z" }, ] [[package]] name = "packaging" version = "24.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, ] [[package]] @@ -182,9 +219,9 @@ dependencies = [ { name = "pygments" }, { name = "tabcompleter" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/69/13/80da03638f62facbee76312ca9ee5941c017b080f2e4c6919fd4e87e16e3/pdbp-1.6.1.tar.gz", hash = "sha256:f4041642952a05df89664e166d5bd379607a0866ddd753c06874f65552bdf40b", size = 25322 } +sdist = { url = "https://files.pythonhosted.org/packages/69/13/80da03638f62facbee76312ca9ee5941c017b080f2e4c6919fd4e87e16e3/pdbp-1.6.1.tar.gz", hash = "sha256:f4041642952a05df89664e166d5bd379607a0866ddd753c06874f65552bdf40b", size = 25322, upload-time = "2024-11-07T15:36:43.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/29/93/d56fb9ba5569dc29d8263c72e46d21a2fd38741339ebf03f54cf7561828c/pdbp-1.6.1-py3-none-any.whl", hash = "sha256:f10bad2ee044c0e5c168cb0825abfdbdc01c50013e9755df5261b060bdd35c22", size = 21495 }, + { url = "https://files.pythonhosted.org/packages/29/93/d56fb9ba5569dc29d8263c72e46d21a2fd38741339ebf03f54cf7561828c/pdbp-1.6.1-py3-none-any.whl", hash = "sha256:f10bad2ee044c0e5c168cb0825abfdbdc01c50013e9755df5261b060bdd35c22", size = 21495, upload-time = "2024-11-07T15:36:41.061Z" }, ] [[package]] @@ -194,18 +231,18 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ptyprocess" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, ] [[package]] name = "pluggy" version = "1.5.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, ] [[package]] @@ -215,51 +252,66 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wcwidth" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087, upload-time = "2025-01-20T15:55:35.072Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, + { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816, upload-time = "2025-01-20T15:55:29.98Z" }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, ] [[package]] name = "ptyprocess" version = "0.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, ] [[package]] name = "pycparser" version = "2.22" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, ] [[package]] name = "pygments" version = "2.19.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, ] [[package]] name = "pyperclip" version = "1.9.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/30/23/2f0a3efc4d6a32f3b63cdff36cd398d9701d26cda58e3ab97ac79fb5e60d/pyperclip-1.9.0.tar.gz", hash = "sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310", size = 20961 } +sdist = { url = "https://files.pythonhosted.org/packages/30/23/2f0a3efc4d6a32f3b63cdff36cd398d9701d26cda58e3ab97ac79fb5e60d/pyperclip-1.9.0.tar.gz", hash = "sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310", size = 20961, upload-time = "2024-06-18T20:38:48.401Z" } [[package]] name = "pyreadline3" version = "3.5.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839 } +sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839, upload-time = "2024-09-19T02:40:10.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178 }, + { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178, upload-time = "2024-09-19T02:40:08.598Z" }, ] [[package]] @@ -272,36 +324,36 @@ dependencies = [ { name = "packaging" }, { name = "pluggy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634 }, + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" }, ] [[package]] name = "sniffio" version = "1.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, ] [[package]] name = "sortedcontainers" version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594 } +sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594, upload-time = "2021-05-16T22:03:42.897Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575 }, + { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" }, ] [[package]] name = "stackscope" version = "0.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4a/fc/20dbb993353f31230138f3c63f3f0c881d1853e70d7a30cd68d2ba4cf1e2/stackscope-0.2.2.tar.gz", hash = "sha256:f508c93eb4861ada466dd3ff613ca203962ceb7587ad013759f15394e6a4e619", size = 90479 } +sdist = { url = "https://files.pythonhosted.org/packages/4a/fc/20dbb993353f31230138f3c63f3f0c881d1853e70d7a30cd68d2ba4cf1e2/stackscope-0.2.2.tar.gz", hash = "sha256:f508c93eb4861ada466dd3ff613ca203962ceb7587ad013759f15394e6a4e619", size = 90479, upload-time = "2024-02-27T22:02:15.831Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/5f/0a674fcafa03528089badb46419413f342537b5b57d2fefc9900fb8ee4e4/stackscope-0.2.2-py3-none-any.whl", hash = "sha256:c199b0cda738d39c993ee04eb01961b06b7e9aeb43ebf9fd6226cdd72ea9faf6", size = 80807 }, + { url = "https://files.pythonhosted.org/packages/f1/5f/0a674fcafa03528089badb46419413f342537b5b57d2fefc9900fb8ee4e4/stackscope-0.2.2-py3-none-any.whl", hash = "sha256:c199b0cda738d39c993ee04eb01961b06b7e9aeb43ebf9fd6226cdd72ea9faf6", size = 80807, upload-time = "2024-02-27T22:02:13.692Z" }, ] [[package]] @@ -311,9 +363,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyreadline3", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/73/1a/ed3544579628c5709bae6fae2255e94c6982a9ff77d42d8ba59fd2f3b21a/tabcompleter-1.4.0.tar.gz", hash = "sha256:7562a9938e62f8e7c3be612c3ac4e14c5ec4307b58ba9031c148260e866e8814", size = 10431 } +sdist = { url = "https://files.pythonhosted.org/packages/73/1a/ed3544579628c5709bae6fae2255e94c6982a9ff77d42d8ba59fd2f3b21a/tabcompleter-1.4.0.tar.gz", hash = "sha256:7562a9938e62f8e7c3be612c3ac4e14c5ec4307b58ba9031c148260e866e8814", size = 10431, upload-time = "2024-10-28T00:44:52.665Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl", hash = "sha256:d744aa735b49c0a6cc2fb8fcd40077fec47425e4388301010b14e6ce3311368b", size = 6725 }, + { url = "https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl", hash = "sha256:d744aa735b49c0a6cc2fb8fcd40077fec47425e4388301010b14e6ce3311368b", size = 6725, upload-time = "2024-10-28T00:44:51.267Z" }, ] [[package]] @@ -321,6 +373,8 @@ name = "tractor" version = "0.1.0a6.dev0" source = { editable = "." } dependencies = [ + { name = "bidict" }, + { name = "cffi" }, { name = "colorlog" }, { name = "msgspec" }, { name = "pdbp" }, @@ -334,14 +388,18 @@ dev = [ { name = "greenback" }, { name = "pexpect" }, { name = "prompt-toolkit" }, + { name = "psutil" }, { name = "pyperclip" }, { name = "pytest" }, { name = "stackscope" }, + { name = "typing-extensions" }, { name = "xonsh" }, ] [package.metadata] requires-dist = [ + { name = "bidict", specifier = ">=0.23.1" }, + { name = "cffi", specifier = ">=1.17.1" }, { name = "colorlog", specifier = ">=6.8.2,<7" }, { name = "msgspec", specifier = ">=0.19.0" }, { name = "pdbp", specifier = ">=1.6,<2" }, @@ -355,9 +413,11 @@ dev = [ { name = "greenback", specifier = ">=1.2.1,<2" }, { name = "pexpect", specifier = ">=4.9.0,<5" }, { name = "prompt-toolkit", specifier = ">=3.0.50" }, + { name = "psutil", specifier = ">=7.0.0" }, { name = "pyperclip", specifier = ">=1.9.0" }, { name = "pytest", specifier = ">=8.3.5" }, { name = "stackscope", specifier = ">=0.2.2,<0.3" }, + { name = "typing-extensions", specifier = ">=4.14.1" }, { name = "xonsh", specifier = ">=0.19.2" }, ] @@ -368,9 +428,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "trio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f8/8e/fdd7bc467b40eedd0a5f2ed36b0d692c6e6f2473be00c8160e2e9f53adc1/tricycle-0.4.1.tar.gz", hash = "sha256:f56edb4b3e1bed3e2552b1b499b24a2dab47741e92e9b4d806acc5c35c9e6066", size = 41551 } +sdist = { url = "https://files.pythonhosted.org/packages/f8/8e/fdd7bc467b40eedd0a5f2ed36b0d692c6e6f2473be00c8160e2e9f53adc1/tricycle-0.4.1.tar.gz", hash = "sha256:f56edb4b3e1bed3e2552b1b499b24a2dab47741e92e9b4d806acc5c35c9e6066", size = 41551, upload-time = "2024-02-02T20:41:15.298Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/c6/7cc05d60e21c683df99167db071ce5d848f5063c2a63971a8443466f603e/tricycle-0.4.1-py3-none-any.whl", hash = "sha256:67900995a73e7445e2c70250cdca04a778d9c3923dd960a97ad4569085e0fb3f", size = 35316 }, + { url = "https://files.pythonhosted.org/packages/d7/c6/7cc05d60e21c683df99167db071ce5d848f5063c2a63971a8443466f603e/tricycle-0.4.1-py3-none-any.whl", hash = "sha256:67900995a73e7445e2c70250cdca04a778d9c3923dd960a97ad4569085e0fb3f", size = 35316, upload-time = "2024-02-02T20:41:14.108Z" }, ] [[package]] @@ -385,82 +445,91 @@ dependencies = [ { name = "sniffio" }, { name = "sortedcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/47/f62e62a1a6f37909aed0bf8f5d5411e06fa03846cfcb64540cd1180ccc9f/trio-0.29.0.tar.gz", hash = "sha256:ea0d3967159fc130acb6939a0be0e558e364fee26b5deeecc893a6b08c361bdf", size = 588952 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/47/f62e62a1a6f37909aed0bf8f5d5411e06fa03846cfcb64540cd1180ccc9f/trio-0.29.0.tar.gz", hash = "sha256:ea0d3967159fc130acb6939a0be0e558e364fee26b5deeecc893a6b08c361bdf", size = 588952, upload-time = "2025-02-14T07:13:50.724Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/55/c4d9bea8b3d7937901958f65124123512419ab0eb73695e5f382521abbfb/trio-0.29.0-py3-none-any.whl", hash = "sha256:d8c463f1a9cc776ff63e331aba44c125f423a5a13c684307e828d930e625ba66", size = 492920 }, + { url = "https://files.pythonhosted.org/packages/c9/55/c4d9bea8b3d7937901958f65124123512419ab0eb73695e5f382521abbfb/trio-0.29.0-py3-none-any.whl", hash = "sha256:d8c463f1a9cc776ff63e331aba44c125f423a5a13c684307e828d930e625ba66", size = 492920, upload-time = "2025-02-14T07:13:48.696Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" }, ] [[package]] name = "wcwidth" version = "0.2.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, ] [[package]] name = "wrapt" version = "1.17.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531 } +sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531, upload-time = "2025-01-14T10:35:45.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308 }, - { url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488 }, - { url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776 }, - { url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776 }, - { url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420 }, - { url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199 }, - { url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307 }, - { url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025 }, - { url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879 }, - { url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419 }, - { url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773 }, - { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799 }, - { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821 }, - { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919 }, - { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721 }, - { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899 }, - { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222 }, - { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707 }, - { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685 }, - { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 }, - { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 }, - { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 }, - { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 }, - { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 }, - { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 }, - { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 }, - { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 }, - { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 }, - { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 }, - { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 }, - { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 }, - { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 }, - { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 }, - { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 }, - { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 }, - { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 }, - { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 }, - { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 }, - { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 }, - { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 }, - { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 }, - { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 }, - { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 }, - { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 }, - { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 }, + { url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308, upload-time = "2025-01-14T10:33:33.992Z" }, + { url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488, upload-time = "2025-01-14T10:33:35.264Z" }, + { url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776, upload-time = "2025-01-14T10:33:38.28Z" }, + { url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776, upload-time = "2025-01-14T10:33:40.678Z" }, + { url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420, upload-time = "2025-01-14T10:33:41.868Z" }, + { url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199, upload-time = "2025-01-14T10:33:43.598Z" }, + { url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307, upload-time = "2025-01-14T10:33:48.499Z" }, + { url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025, upload-time = "2025-01-14T10:33:51.191Z" }, + { url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879, upload-time = "2025-01-14T10:33:52.328Z" }, + { url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419, upload-time = "2025-01-14T10:33:53.551Z" }, + { url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773, upload-time = "2025-01-14T10:33:56.323Z" }, + { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799, upload-time = "2025-01-14T10:33:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821, upload-time = "2025-01-14T10:33:59.334Z" }, + { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919, upload-time = "2025-01-14T10:34:04.093Z" }, + { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721, upload-time = "2025-01-14T10:34:07.163Z" }, + { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899, upload-time = "2025-01-14T10:34:09.82Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222, upload-time = "2025-01-14T10:34:11.258Z" }, + { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707, upload-time = "2025-01-14T10:34:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685, upload-time = "2025-01-14T10:34:15.043Z" }, + { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567, upload-time = "2025-01-14T10:34:16.563Z" }, + { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672, upload-time = "2025-01-14T10:34:17.727Z" }, + { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865, upload-time = "2025-01-14T10:34:19.577Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800, upload-time = "2025-01-14T10:34:21.571Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824, upload-time = "2025-01-14T10:34:22.999Z" }, + { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920, upload-time = "2025-01-14T10:34:25.386Z" }, + { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690, upload-time = "2025-01-14T10:34:28.058Z" }, + { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861, upload-time = "2025-01-14T10:34:29.167Z" }, + { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174, upload-time = "2025-01-14T10:34:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721, upload-time = "2025-01-14T10:34:32.91Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763, upload-time = "2025-01-14T10:34:34.903Z" }, + { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585, upload-time = "2025-01-14T10:34:36.13Z" }, + { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676, upload-time = "2025-01-14T10:34:37.962Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871, upload-time = "2025-01-14T10:34:39.13Z" }, + { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312, upload-time = "2025-01-14T10:34:40.604Z" }, + { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062, upload-time = "2025-01-14T10:34:45.011Z" }, + { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155, upload-time = "2025-01-14T10:34:47.25Z" }, + { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471, upload-time = "2025-01-14T10:34:50.934Z" }, + { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208, upload-time = "2025-01-14T10:34:52.297Z" }, + { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339, upload-time = "2025-01-14T10:34:53.489Z" }, + { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232, upload-time = "2025-01-14T10:34:55.327Z" }, + { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476, upload-time = "2025-01-14T10:34:58.055Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377, upload-time = "2025-01-14T10:34:59.3Z" }, + { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986, upload-time = "2025-01-14T10:35:00.498Z" }, + { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750, upload-time = "2025-01-14T10:35:03.378Z" }, + { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594, upload-time = "2025-01-14T10:35:44.018Z" }, ] [[package]] name = "xonsh" version = "0.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/68/4e/56e95a5e607eb3b0da37396f87cde70588efc8ef819ab16f02d5b8378dc4/xonsh-0.19.2.tar.gz", hash = "sha256:cfdd0680d954a2c3aefd6caddcc7143a3d06aa417ed18365a08219bb71b960b0", size = 799960 } +sdist = { url = "https://files.pythonhosted.org/packages/68/4e/56e95a5e607eb3b0da37396f87cde70588efc8ef819ab16f02d5b8378dc4/xonsh-0.19.2.tar.gz", hash = "sha256:cfdd0680d954a2c3aefd6caddcc7143a3d06aa417ed18365a08219bb71b960b0", size = 799960, upload-time = "2025-02-11T17:10:43.563Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6c/13/281094759df87b23b3c02dc4a16603ab08ea54d7f6acfeb69f3341137c7a/xonsh-0.19.2-py310-none-any.whl", hash = "sha256:ec7f163fd3a4943782aa34069d4e72793328c916a5975949dbec8536cbfc089b", size = 642301 }, - { url = "https://files.pythonhosted.org/packages/29/41/a51e4c3918fe9a293b150cb949b1b8c6d45eb17dfed480dcb76ea43df4e7/xonsh-0.19.2-py311-none-any.whl", hash = "sha256:53c45f7a767901f2f518f9b8dd60fc653e0498e56e89825e1710bb0859985049", size = 642286 }, - { url = "https://files.pythonhosted.org/packages/0a/93/9a77b731f492fac27c577dea2afb5a2bcc2a6a1c79be0c86c95498060270/xonsh-0.19.2-py312-none-any.whl", hash = "sha256:b24c619aa52b59eae4d35c4195dba9b19a2c548fb5c42c6f85f2b8ccb96807b5", size = 642386 }, - { url = "https://files.pythonhosted.org/packages/be/75/070324769c1ff88d971ce040f4f486339be98e0a365c8dd9991eb654265b/xonsh-0.19.2-py313-none-any.whl", hash = "sha256:c53ef6c19f781fbc399ed1b382b5c2aac2125010679a3b61d643978273c27df0", size = 642873 }, - { url = "https://files.pythonhosted.org/packages/fa/cb/2c7ccec54f5b0e73fdf7650e8336582ff0347d9001c5ef8271dc00c034fe/xonsh-0.19.2-py39-none-any.whl", hash = "sha256:bcc0225dc3847f1ed2f175dac6122fbcc54cea67d9c2dc2753d9615e2a5ff284", size = 634602 }, + { url = "https://files.pythonhosted.org/packages/6c/13/281094759df87b23b3c02dc4a16603ab08ea54d7f6acfeb69f3341137c7a/xonsh-0.19.2-py310-none-any.whl", hash = "sha256:ec7f163fd3a4943782aa34069d4e72793328c916a5975949dbec8536cbfc089b", size = 642301, upload-time = "2025-02-11T17:10:39.244Z" }, + { url = "https://files.pythonhosted.org/packages/29/41/a51e4c3918fe9a293b150cb949b1b8c6d45eb17dfed480dcb76ea43df4e7/xonsh-0.19.2-py311-none-any.whl", hash = "sha256:53c45f7a767901f2f518f9b8dd60fc653e0498e56e89825e1710bb0859985049", size = 642286, upload-time = "2025-02-11T17:10:41.678Z" }, + { url = "https://files.pythonhosted.org/packages/0a/93/9a77b731f492fac27c577dea2afb5a2bcc2a6a1c79be0c86c95498060270/xonsh-0.19.2-py312-none-any.whl", hash = "sha256:b24c619aa52b59eae4d35c4195dba9b19a2c548fb5c42c6f85f2b8ccb96807b5", size = 642386, upload-time = "2025-02-11T17:10:43.688Z" }, + { url = "https://files.pythonhosted.org/packages/be/75/070324769c1ff88d971ce040f4f486339be98e0a365c8dd9991eb654265b/xonsh-0.19.2-py313-none-any.whl", hash = "sha256:c53ef6c19f781fbc399ed1b382b5c2aac2125010679a3b61d643978273c27df0", size = 642873, upload-time = "2025-02-11T17:10:39.297Z" }, + { url = "https://files.pythonhosted.org/packages/fa/cb/2c7ccec54f5b0e73fdf7650e8336582ff0347d9001c5ef8271dc00c034fe/xonsh-0.19.2-py39-none-any.whl", hash = "sha256:bcc0225dc3847f1ed2f175dac6122fbcc54cea67d9c2dc2753d9615e2a5ff284", size = 634602, upload-time = "2025-02-11T17:10:37.004Z" }, ]