diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 571bd1db..be5cb272 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,46 +8,70 @@ on: workflow_dispatch: jobs: - - mypy: - name: 'MyPy' - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Setup python - uses: actions/setup-python@v2 - with: - python-version: '3.11' - - - name: Install dependencies - run: pip install -U . --upgrade-strategy eager -r requirements-test.txt - - - name: Run MyPy check - run: mypy tractor/ --ignore-missing-imports --show-traceback - + # ------ sdist ------ # test that we can generate a software distribution and install it # thus avoid missing file issues after packaging. + # + # -[x] produce sdist with uv + # ------ - ------ sdist-linux: name: 'sdist' runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - - name: Setup python - uses: actions/setup-python@v2 - with: - python-version: '3.11' + - name: Install latest uv + uses: astral-sh/setup-uv@v6 - - name: Build sdist - run: python setup.py sdist --formats=zip + - name: Build sdist as tar.gz + run: uv build --sdist --python=3.13 - - name: Install sdist from .zips - run: python -m pip install dist/*.zip + - name: Install sdist from .tar.gz + run: python -m pip install dist/*.tar.gz + + # ------ type-check ------ + # mypy: + # name: 'MyPy' + # runs-on: ubuntu-latest + + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + + # - name: Install latest uv + # uses: astral-sh/setup-uv@v6 + + # # faster due to server caching? + # # https://docs.astral.sh/uv/guides/integration/github/#setting-up-python + # - name: "Set up Python" + # uses: actions/setup-python@v6 + # with: + # python-version-file: "pyproject.toml" + + # # w uv + # # - name: Set up Python + # # run: uv python install + + # - name: Setup uv venv + # run: uv venv .venv --python=3.13 + + # - name: Install + # run: uv sync --dev + + # # TODO, ty cmd over repo + # # - name: type check with ty + # # run: ty ./tractor/ + + # # - uses: actions/cache@v3 + # # name: Cache uv virtenv as default .venv + # # with: + # # path: ./.venv + # # key: venv-${{ hashFiles('uv.lock') }} + + # - name: Run MyPy check + # run: mypy tractor/ --ignore-missing-imports --show-traceback testing-linux: @@ -59,32 +83,45 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python: ['3.11'] + python-version: ['3.13'] spawn_backend: [ 'trio', - 'mp_spawn', - 'mp_forkserver', + # 'mp_spawn', + # 'mp_forkserver', ] steps: - - name: Checkout - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - - name: Setup python - uses: actions/setup-python@v2 + - name: 'Install uv + py-${{ matrix.python-version }}' + uses: astral-sh/setup-uv@v6 with: - python-version: '${{ matrix.python }}' + python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager + # GH way.. faster? + # - name: setup-python@v6 + # uses: actions/setup-python@v6 + # with: + # python-version: '${{ matrix.python-version }}' - - name: List dependencies - run: pip list + # consider caching for speedups? + # https://docs.astral.sh/uv/guides/integration/github/#caching + + - name: Install the project w uv + run: uv sync --all-extras --dev + + # - name: Install dependencies + # run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager + + - name: List deps tree + run: uv tree - name: Run tests - run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx + run: uv run pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx + # XXX legacy NOTE XXX + # # We skip 3.10 on windows for now due to not having any collabs to # debug the CI failures. Anyone wanting to hack and solve them is very # welcome, but our primary user base is not using that OS. diff --git a/default.nix b/default.nix new file mode 100644 index 00000000..08e46d06 --- /dev/null +++ b/default.nix @@ -0,0 +1,19 @@ +{ pkgs ? import {} }: +let + nativeBuildInputs = with pkgs; [ + stdenv.cc.cc.lib + uv + ]; + +in +pkgs.mkShell { + inherit nativeBuildInputs; + + LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath nativeBuildInputs; + TMPDIR = "/tmp"; + + shellHook = '' + set -e + uv venv .venv --python=3.12 + ''; +} diff --git a/docs/README.rst b/docs/README.rst index e3bd9f84..cea223ee 100644 --- a/docs/README.rst +++ b/docs/README.rst @@ -1,8 +1,5 @@ |logo| ``tractor``: distributed structurred concurrency -|gh_actions| -|docs| - ``tractor`` is a `structured concurrency`_ (SC), multi-processing_ runtime built on trio_. Fundamentally, ``tractor`` provides parallelism via @@ -66,6 +63,13 @@ Features - (WIP) a ``TaskMngr``: one-cancels-one style nursery supervisor. +Status of `main` / infra +------------------------ + +- |gh_actions| +- |docs| + + Install ------- ``tractor`` is still in a *alpha-near-beta-stage* for many @@ -689,9 +693,11 @@ channel`_! .. _msgspec: https://jcristharif.com/msgspec/ .. _guest: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops - -.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square - :target: https://actions-badge.atrox.dev/goodboy/tractor/goto +.. + NOTE, on generating badge links from the UI + https://docs.github.com/en/actions/how-tos/monitoring-and-troubleshooting-workflows/monitoring-workflows/adding-a-workflow-status-badge?ref=gitguardian-blog-automated-secrets-detection#using-the-ui +.. |gh_actions| image:: https://github.com/goodboy/tractor/actions/workflows/ci.yml/badge.svg?branch=main + :target: https://github.com/goodboy/tractor/actions/workflows/ci.yml .. |docs| image:: https://readthedocs.org/projects/tractor/badge/?version=latest :target: https://tractor.readthedocs.io/en/latest/?badge=latest diff --git a/examples/advanced_faults/ipc_failure_during_stream.py b/examples/advanced_faults/ipc_failure_during_stream.py index 950d5a6f..c88e0dfe 100644 --- a/examples/advanced_faults/ipc_failure_during_stream.py +++ b/examples/advanced_faults/ipc_failure_during_stream.py @@ -16,6 +16,7 @@ from tractor import ( ContextCancelled, MsgStream, _testing, + trionics, ) import trio import pytest @@ -62,9 +63,8 @@ async def recv_and_spawn_net_killers( await ctx.started() async with ( ctx.open_stream() as stream, - trio.open_nursery( - strict_exception_groups=False, - ) as tn, + trionics.collapse_eg(), + trio.open_nursery() as tn, ): async for i in stream: print(f'child echoing {i}') @@ -120,6 +120,7 @@ async def main( break_parent_ipc_after: int|bool = False, break_child_ipc_after: int|bool = False, pre_close: bool = False, + tpt_proto: str = 'tcp', ) -> None: @@ -131,6 +132,7 @@ async def main( # a hang since it never engages due to broken IPC debug_mode=debug_mode, loglevel=loglevel, + enable_transports=[tpt_proto], ) as an, ): @@ -145,7 +147,8 @@ async def main( _testing.expect_ctxc( yay=( break_parent_ipc_after - or break_child_ipc_after + or + break_child_ipc_after ), # TODO: we CAN'T remove this right? # since we need the ctxc to bubble up from either diff --git a/examples/debugging/asyncio_bp.py b/examples/debugging/asyncio_bp.py index 296dbccb..fc3b222a 100644 --- a/examples/debugging/asyncio_bp.py +++ b/examples/debugging/asyncio_bp.py @@ -29,7 +29,7 @@ async def bp_then_error( to_trio.send_nowait('start') # NOTE: what happens here inside the hook needs some refinement.. - # => seems like it's still `._debug._set_trace()` but + # => seems like it's still `.debug._set_trace()` but # we set `Lock.local_task_in_debug = 'sync'`, we probably want # some further, at least, meta-data about the task/actor in debug # in terms of making it clear it's `asyncio` mucking about. diff --git a/examples/debugging/restore_builtin_breakpoint.py b/examples/debugging/restore_builtin_breakpoint.py index b591b0f7..06c3bbc4 100644 --- a/examples/debugging/restore_builtin_breakpoint.py +++ b/examples/debugging/restore_builtin_breakpoint.py @@ -4,6 +4,11 @@ import sys import trio import tractor +# ensure mod-path is correct! +from tractor.devx.debug import ( + _sync_pause_from_builtin as _sync_pause_from_builtin, +) + async def main() -> None: @@ -13,19 +18,23 @@ async def main() -> None: async with tractor.open_nursery( debug_mode=True, - ) as an: - assert an + loglevel='devx', + maybe_enable_greenback=True, + # ^XXX REQUIRED to enable `breakpoint()` support (from sync + # fns) and thus required here to avoid an assertion err + # on the next line + ): assert ( (pybp_var := os.environ['PYTHONBREAKPOINT']) == - 'tractor.devx._debug._sync_pause_from_builtin' + 'tractor.devx.debug._sync_pause_from_builtin' ) # TODO: an assert that verifies the hook has indeed been, hooked # XD assert ( (pybp_hook := sys.breakpointhook) - is not tractor.devx._debug._set_trace + is not tractor.devx.debug._set_trace ) print( diff --git a/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py b/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py index 16f92b81..72c6de4c 100644 --- a/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py +++ b/examples/debugging/root_cancelled_but_child_is_in_tty_lock.py @@ -24,10 +24,9 @@ async def spawn_until(depth=0): async def main(): - """The main ``tractor`` routine. - - The process tree should look as approximately as follows when the debugger - first engages: + ''' + The process tree should look as approximately as follows when the + debugger first engages: python examples/debugging/multi_nested_subactors_bp_forever.py ├─ python -m tractor._child --uid ('spawner1', '7eab8462 ...) @@ -37,10 +36,11 @@ async def main(): └─ python -m tractor._child --uid ('spawner0', '1d42012b ...) └─ python -m tractor._child --uid ('name_error', '6c2733b8 ...) - """ + ''' async with tractor.open_nursery( debug_mode=True, - loglevel='warning' + loglevel='devx', + enable_transports=['uds'], ) as n: # spawn both actors diff --git a/examples/debugging/root_self_cancelled_w_error.py b/examples/debugging/root_self_cancelled_w_error.py new file mode 100644 index 00000000..b3c15288 --- /dev/null +++ b/examples/debugging/root_self_cancelled_w_error.py @@ -0,0 +1,35 @@ +import trio +import tractor + + +async def main(): + async with tractor.open_root_actor( + debug_mode=True, + loglevel='cancel', + ) as _root: + + # manually trigger self-cancellation and wait + # for it to fully trigger. + _root.cancel_soon() + await _root._cancel_complete.wait() + print('root cancelled') + + # now ensure we can still use the REPL + try: + await tractor.pause() + except trio.Cancelled as _taskc: + assert (root_cs := _root._root_tn.cancel_scope).cancel_called + # NOTE^^ above logic but inside `open_root_actor()` and + # passed to the `shield=` expression is effectively what + # we're testing here! + await tractor.pause(shield=root_cs.cancel_called) + + # XXX, if shield logic *is wrong* inside `open_root_actor()`'s + # crash-handler block this should never be interacted, + # instead `trio.Cancelled` would be bubbled up: the original + # BUG. + assert 0 + + +if __name__ == '__main__': + trio.run(main) diff --git a/examples/debugging/shield_hang_in_sub.py b/examples/debugging/shield_hang_in_sub.py index 5387353f..bf045fe8 100644 --- a/examples/debugging/shield_hang_in_sub.py +++ b/examples/debugging/shield_hang_in_sub.py @@ -37,6 +37,7 @@ async def main( enable_stack_on_sig=True, # maybe_enable_greenback=False, loglevel='devx', + enable_transports=['uds'], ) as an, ): ptl: tractor.Portal = await an.start_actor( diff --git a/examples/debugging/subactor_bp_in_ctx.py b/examples/debugging/subactor_bp_in_ctx.py index 2c5fee8c..f55d2cd4 100644 --- a/examples/debugging/subactor_bp_in_ctx.py +++ b/examples/debugging/subactor_bp_in_ctx.py @@ -33,8 +33,11 @@ async def just_bp( async def main(): + async with tractor.open_nursery( debug_mode=True, + enable_transports=['uds'], + loglevel='devx', ) as n: p = await n.start_actor( 'bp_boi', diff --git a/examples/debugging/sync_bp.py b/examples/debugging/sync_bp.py index 95472c93..a26a9c54 100644 --- a/examples/debugging/sync_bp.py +++ b/examples/debugging/sync_bp.py @@ -6,7 +6,7 @@ import tractor # TODO: only import these when not running from test harness? # can we detect `pexpect` usage maybe? -# from tractor.devx._debug import ( +# from tractor.devx.debug import ( # get_lock, # get_debug_req, # ) diff --git a/examples/quick_cluster.py b/examples/quick_cluster.py index 2378a3cf..3fa4ca2a 100644 --- a/examples/quick_cluster.py +++ b/examples/quick_cluster.py @@ -23,9 +23,8 @@ async def main(): modules=[__name__] ) as portal_map, - trio.open_nursery( - strict_exception_groups=False, - ) as tn, + tractor.trionics.collapse_eg(), + trio.open_nursery() as tn, ): for (name, portal) in portal_map.items(): diff --git a/examples/service_discovery.py b/examples/service_discovery.py index a0f37b88..1219f0c1 100644 --- a/examples/service_discovery.py +++ b/examples/service_discovery.py @@ -9,7 +9,7 @@ async def main(service_name): async with tractor.open_nursery() as an: await an.start_actor(service_name) - async with tractor.get_registry('127.0.0.1', 1616) as portal: + async with tractor.get_registry() as portal: print(f"Arbiter is listening on {portal.channel}") async with tractor.wait_for_actor(service_name) as sockaddr: diff --git a/pyproject.toml b/pyproject.toml index b3e9e100..a0491598 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,8 @@ dependencies = [ "pdbp>=1.6,<2", # windows only (from `pdbp`) # typed IPC msging "msgspec>=0.19.0", + "cffi>=1.17.1", + "bidict>=0.23.1", ] # ------ project ------ @@ -59,9 +61,13 @@ dev = [ # `tractor.devx` tooling "greenback>=1.2.1,<2", "stackscope>=0.2.2,<0.3", + # ^ requires this? + "typing-extensions>=4.14.1", + "pyperclip>=1.9.0", "prompt-toolkit>=3.0.50", "xonsh>=0.19.2", + "psutil>=7.0.0", ] # TODO, add these with sane versions; were originally in # `requirements-docs.txt`.. diff --git a/tests/conftest.py b/tests/conftest.py index 674767ff..b84f4105 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,24 +1,27 @@ """ -``tractor`` testing!! +Top level of the testing suites! + """ +from __future__ import annotations import sys import subprocess import os -import random import signal import platform import time import pytest -import tractor from tractor._testing import ( examples_dir as examples_dir, tractor_test as tractor_test, expect_ctxc as expect_ctxc, ) -# TODO: include wtv plugin(s) we build in `._testing.pytest`? -pytest_plugins = ['pytester'] +pytest_plugins: list[str] = [ + 'pytester', + 'tractor._testing.pytest', +] + # Sending signal.SIGINT on subprocess fails on windows. Use CTRL_* alternatives if platform.system() == 'Windows': @@ -30,7 +33,11 @@ else: _KILL_SIGNAL = signal.SIGKILL _INT_SIGNAL = signal.SIGINT _INT_RETURN_CODE = 1 if sys.version_info < (3, 8) else -signal.SIGINT.value - _PROC_SPAWN_WAIT = 0.6 if sys.version_info < (3, 7) else 0.4 + _PROC_SPAWN_WAIT = ( + 0.6 + if sys.version_info < (3, 7) + else 0.4 + ) no_windows = pytest.mark.skipif( @@ -39,7 +46,12 @@ no_windows = pytest.mark.skipif( ) -def pytest_addoption(parser): +def pytest_addoption( + parser: pytest.Parser, +): + # ?TODO? should this be exposed from our `._testing.pytest` + # plugin or should we make it more explicit with `--tl` for + # tractor logging like we do in other client projects? parser.addoption( "--ll", action="store", @@ -47,42 +59,10 @@ def pytest_addoption(parser): default='ERROR', help="logging level to set when testing" ) - parser.addoption( - "--spawn-backend", - action="store", - dest='spawn_backend', - default='trio', - help="Processing spawning backend to use for test run", - ) - - parser.addoption( - "--tpdb", "--debug-mode", - action="store_true", - dest='tractor_debug_mode', - # default=False, - help=( - 'Enable a flag that can be used by tests to to set the ' - '`debug_mode: bool` for engaging the internal ' - 'multi-proc debugger sys.' - ), - ) - - -def pytest_configure(config): - backend = config.option.spawn_backend - tractor._spawn.try_set_start_method(backend) - - -@pytest.fixture(scope='session') -def debug_mode(request): - debug_mode: bool = request.config.option.tractor_debug_mode - # if debug_mode: - # breakpoint() - return debug_mode - @pytest.fixture(scope='session', autouse=True) def loglevel(request): + import tractor orig = tractor.log._default_loglevel level = tractor.log._default_loglevel = request.config.option.loglevel tractor.log.get_console_log(level) @@ -90,106 +70,44 @@ def loglevel(request): tractor.log._default_loglevel = orig -@pytest.fixture(scope='session') -def spawn_backend(request) -> str: - return request.config.option.spawn_backend - - -# @pytest.fixture(scope='function', autouse=True) -# def debug_enabled(request) -> str: -# from tractor import _state -# if _state._runtime_vars['_debug_mode']: -# breakpoint() - _ci_env: bool = os.environ.get('CI', False) @pytest.fixture(scope='session') def ci_env() -> bool: ''' - Detect CI envoirment. + Detect CI environment. ''' return _ci_env -# TODO: also move this to `._testing` for now? -# -[ ] possibly generalize and re-use for multi-tree spawning -# along with the new stuff for multi-addrs in distribute_dis -# branch? -# -# choose randomly at import time -_reg_addr: tuple[str, int] = ( - '127.0.0.1', - random.randint(1000, 9999), -) - - -@pytest.fixture(scope='session') -def reg_addr() -> tuple[str, int]: - - # globally override the runtime to the per-test-session-dynamic - # addr so that all tests never conflict with any other actor - # tree using the default. - from tractor import _root - _root._default_lo_addrs = [_reg_addr] - - return _reg_addr - - -def pytest_generate_tests(metafunc): - spawn_backend = metafunc.config.option.spawn_backend - - if not spawn_backend: - # XXX some weird windows bug with `pytest`? - spawn_backend = 'trio' - - # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? - assert spawn_backend in ( - 'mp_spawn', - 'mp_forkserver', - 'trio', - ) - - # NOTE: used to be used to dyanmically parametrize tests for when - # you just passed --spawn-backend=`mp` on the cli, but now we expect - # that cli input to be manually specified, BUT, maybe we'll do - # something like this again in the future? - if 'start_method' in metafunc.fixturenames: - metafunc.parametrize("start_method", [spawn_backend], scope='module') - - -# TODO: a way to let test scripts (like from `examples/`) -# guarantee they won't registry addr collide! -# @pytest.fixture -# def open_test_runtime( -# reg_addr: tuple, -# ) -> AsyncContextManager: -# return partial( -# tractor.open_nursery, -# registry_addrs=[reg_addr], -# ) - - -def sig_prog(proc, sig): +def sig_prog( + proc: subprocess.Popen, + sig: int, + canc_timeout: float = 0.1, +) -> int: "Kill the actor-process with ``sig``." proc.send_signal(sig) - time.sleep(0.1) + time.sleep(canc_timeout) if not proc.poll(): # TODO: why sometimes does SIGINT not work on teardown? # seems to happen only when trace logging enabled? proc.send_signal(_KILL_SIGNAL) - ret = proc.wait() + ret: int = proc.wait() assert ret # TODO: factor into @cm and move to `._testing`? @pytest.fixture def daemon( + debug_mode: bool, loglevel: str, - testdir, + testdir: pytest.Pytester, reg_addr: tuple[str, int], -): + tpt_proto: str, + +) -> subprocess.Popen: ''' Run a daemon root actor as a separate actor-process tree and "remote registrar" for discovery-protocol related tests. @@ -200,28 +118,100 @@ def daemon( loglevel: str = 'info' code: str = ( - "import tractor; " - "tractor.run_daemon([], registry_addrs={reg_addrs}, loglevel={ll})" + "import tractor; " + "tractor.run_daemon([], " + "registry_addrs={reg_addrs}, " + "debug_mode={debug_mode}, " + "loglevel={ll})" ).format( reg_addrs=str([reg_addr]), ll="'{}'".format(loglevel) if loglevel else None, + debug_mode=debug_mode, ) cmd: list[str] = [ sys.executable, '-c', code, ] + # breakpoint() kwargs = {} if platform.system() == 'Windows': # without this, tests hang on windows forever kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP - proc = testdir.popen( + proc: subprocess.Popen = testdir.popen( cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, **kwargs, ) - assert not proc.returncode + + # UDS sockets are **really** fast to bind()/listen()/connect() + # so it's often required that we delay a bit more starting + # the first actor-tree.. + if tpt_proto == 'uds': + global _PROC_SPAWN_WAIT + _PROC_SPAWN_WAIT = 0.6 + time.sleep(_PROC_SPAWN_WAIT) + + assert not proc.returncode yield proc sig_prog(proc, _INT_SIGNAL) + + # XXX! yeah.. just be reaaal careful with this bc sometimes it + # can lock up on the `_io.BufferedReader` and hang.. + stderr: str = proc.stderr.read().decode() + if stderr: + print( + f'Daemon actor tree produced STDERR:\n' + f'{proc.args}\n' + f'\n' + f'{stderr}\n' + ) + if proc.returncode != -2: + raise RuntimeError( + 'Daemon actor tree failed !?\n' + f'{proc.args}\n' + ) + + +# @pytest.fixture(autouse=True) +# def shared_last_failed(pytestconfig): +# val = pytestconfig.cache.get("example/value", None) +# breakpoint() +# if val is None: +# pytestconfig.cache.set("example/value", val) +# return val + + +# TODO: a way to let test scripts (like from `examples/`) +# guarantee they won't `registry_addrs` collide! +# -[ ] maybe use some kinda standard `def main()` arg-spec that +# we can introspect from a fixture that is called from the test +# body? +# -[ ] test and figure out typing for below prototype! Bp +# +# @pytest.fixture +# def set_script_runtime_args( +# reg_addr: tuple, +# ) -> Callable[[...], None]: + +# def import_n_partial_in_args_n_triorun( +# script: Path, # under examples? +# **runtime_args, +# ) -> Callable[[], Any]: # a `partial`-ed equiv of `trio.run()` + +# # NOTE, below is taken from +# # `.test_advanced_faults.test_ipc_channel_break_during_stream` +# mod: ModuleType = import_path( +# examples_dir() / 'advanced_faults' +# / 'ipc_failure_during_stream.py', +# root=examples_dir(), +# consider_namespace_packages=False, +# ) +# return partial( +# trio.run, +# partial( +# mod.main, +# **runtime_args, +# ) +# ) +# return import_n_partial_in_args_n_triorun diff --git a/tests/devx/conftest.py b/tests/devx/conftest.py index c45265dc..9a5c90a5 100644 --- a/tests/devx/conftest.py +++ b/tests/devx/conftest.py @@ -2,9 +2,11 @@ `tractor.devx.*` tooling sub-pkg test space. ''' +from __future__ import annotations import time from typing import ( Callable, + TYPE_CHECKING, ) import pytest @@ -16,7 +18,7 @@ from pexpect.spawnbase import SpawnBase from tractor._testing import ( mk_cmd, ) -from tractor.devx._debug import ( +from tractor.devx.debug import ( _pause_msg as _pause_msg, _crash_msg as _crash_msg, _repl_fail_msg as _repl_fail_msg, @@ -26,14 +28,22 @@ from ..conftest import ( _ci_env, ) +if TYPE_CHECKING: + from pexpect import pty_spawn + + +# a fn that sub-instantiates a `pexpect.spawn()` +# and returns it. +type PexpectSpawner = Callable[[str], pty_spawn.spawn] + @pytest.fixture def spawn( - start_method, + start_method: str, testdir: pytest.Pytester, reg_addr: tuple[str, int], -) -> Callable[[str], None]: +) -> PexpectSpawner: ''' Use the `pexpect` module shipped via `testdir.spawn()` to run an `./examples/..` script by name. @@ -59,7 +69,7 @@ def spawn( def _spawn( cmd: str, **mkcmd_kwargs, - ): + ) -> pty_spawn.spawn: unset_colors() return testdir.spawn( cmd=mk_cmd( @@ -73,7 +83,7 @@ def spawn( ) # such that test-dep can pass input script name. - return _spawn + return _spawn # the `PexpectSpawner`, type alias. @pytest.fixture( @@ -111,7 +121,7 @@ def ctlc( # XXX: disable pygments highlighting for auto-tests # since some envs (like actions CI) will struggle # the the added color-char encoding.. - from tractor.devx._debug import TractorConfig + from tractor.devx.debug import TractorConfig TractorConfig.use_pygements = False yield use_ctlc diff --git a/tests/devx/test_debugger.py b/tests/devx/test_debugger.py index 171e983e..cacab803 100644 --- a/tests/devx/test_debugger.py +++ b/tests/devx/test_debugger.py @@ -1,19 +1,23 @@ """ That "native" debug mode better work! -All these tests can be understood (somewhat) by running the equivalent -`examples/debugging/` scripts manually. +All these tests can be understood (somewhat) by running the +equivalent `examples/debugging/` scripts manually. TODO: - - none of these tests have been run successfully on windows yet but - there's been manual testing that verified it works. - - wonder if any of it'll work on OS X? + - none of these tests have been run successfully on windows yet but + there's been manual testing that verified it works. + - wonder if any of it'll work on OS X? """ +from __future__ import annotations from functools import partial import itertools import platform import time +from typing import ( + TYPE_CHECKING, +) import pytest from pexpect.exceptions import ( @@ -34,6 +38,9 @@ from .conftest import ( assert_before, ) +if TYPE_CHECKING: + from ..conftest import PexpectSpawner + # TODO: The next great debugger audit could be done by you! # - recurrent entry to breakpoint() from single actor *after* and an # error in another task? @@ -310,7 +317,6 @@ def test_subactor_breakpoint( assert in_prompt_msg( child, [ - 'MessagingError:', 'RemoteActorError:', "('breakpoint_forever'", 'bdb.BdbQuit', @@ -528,7 +534,7 @@ def test_multi_daemon_subactors( # now the root actor won't clobber the bp_forever child # during it's first access to the debug lock, but will instead # wait for the lock to release, by the edge triggered - # ``devx._debug.Lock.no_remote_has_tty`` event before sending cancel messages + # ``devx.debug.Lock.no_remote_has_tty`` event before sending cancel messages # (via portals) to its underlings B) # at some point here there should have been some warning msg from @@ -919,6 +925,7 @@ def test_post_mortem_api( " bool: + ''' + Set pre/post-REPL state vars and bypass actual conole + interaction. + + ''' + nonlocal repl_acquired, repl_released + + # task: trio.Task = trio.lowlevel.current_task() + # print(f'pre-REPL active_task={task.name}') + + print('pre-REPL') + repl_acquired = True + yield False # never actually .interact() + print('post-REPL') + repl_released = True + + try: + # TODO, with runtime's `debug_mode` setting + # -[ ] need to open runtime tho obvi.. + # + # with tractor.devx.maybe_open_crash_handler( + # pdb=True, + + with tractor.devx.open_crash_handler( + raise_on_exit=raise_on_exit, + repl_fixture=block_repl_ux + ) as bxerr: + if to_raise is not None: + raise to_raise + + except Exception as _exc: + exc = _exc + if ( + raise_on_exit is True + or + type(to_raise) in raise_on_exit + ): + assert ( + exc + is + to_raise + is + bxerr.value + ) + + else: + raise + else: + assert ( + to_raise is None + or + not raise_on_exit + or + type(to_raise) not in raise_on_exit + ) + assert bxerr.value is to_raise + + assert bxerr.raise_on_exit == raise_on_exit + + if to_raise is not None: + assert repl_acquired + assert repl_released diff --git a/tests/ipc/__init__.py b/tests/ipc/__init__.py new file mode 100644 index 00000000..b8b1f156 --- /dev/null +++ b/tests/ipc/__init__.py @@ -0,0 +1,4 @@ +''' +`tractor.ipc` subsystem(s)/unit testing suites. + +''' diff --git a/tests/ipc/test_each_tpt.py b/tests/ipc/test_each_tpt.py new file mode 100644 index 00000000..9ed45789 --- /dev/null +++ b/tests/ipc/test_each_tpt.py @@ -0,0 +1,114 @@ +''' +Unit-ish tests for specific IPC transport protocol backends. + +''' +from __future__ import annotations +from pathlib import Path + +import pytest +import trio +import tractor +from tractor import ( + Actor, + _state, + _addr, +) + + +@pytest.fixture +def bindspace_dir_str() -> str: + + rt_dir: Path = tractor._state.get_rt_dir() + bs_dir: Path = rt_dir / 'doggy' + bs_dir_str: str = str(bs_dir) + assert not bs_dir.is_dir() + + yield bs_dir_str + + # delete it on suite teardown. + # ?TODO? should we support this internally + # or is leaking it ok? + if bs_dir.is_dir(): + bs_dir.rmdir() + + +def test_uds_bindspace_created_implicitly( + debug_mode: bool, + bindspace_dir_str: str, +): + registry_addr: tuple = ( + f'{bindspace_dir_str}', + 'registry@doggy.sock', + ) + bs_dir_str: str = registry_addr[0] + + # XXX, ensure bindspace-dir DNE beforehand! + assert not Path(bs_dir_str).is_dir() + + async def main(): + async with tractor.open_nursery( + enable_transports=['uds'], + registry_addrs=[registry_addr], + debug_mode=debug_mode, + ) as _an: + + # XXX MUST be created implicitly by + # `.ipc._uds.start_listener()`! + assert Path(bs_dir_str).is_dir() + + root: Actor = tractor.current_actor() + assert root.is_registrar + + assert registry_addr in root.reg_addrs + assert ( + registry_addr + in + _state._runtime_vars['_registry_addrs'] + ) + assert ( + _addr.wrap_address(registry_addr) + in + root.registry_addrs + ) + + trio.run(main) + + +def test_uds_double_listen_raises_connerr( + debug_mode: bool, + bindspace_dir_str: str, +): + registry_addr: tuple = ( + f'{bindspace_dir_str}', + 'registry@doggy.sock', + ) + + async def main(): + async with tractor.open_nursery( + enable_transports=['uds'], + registry_addrs=[registry_addr], + debug_mode=debug_mode, + ) as _an: + + # runtime up + root: Actor = tractor.current_actor() + + from tractor.ipc._uds import ( + start_listener, + UDSAddress, + ) + ya_bound_addr: UDSAddress = root.registry_addrs[0] + try: + await start_listener( + addr=ya_bound_addr, + ) + except ConnectionError as connerr: + assert type(src_exc := connerr.__context__) is OSError + assert 'Address already in use' in src_exc.args + # complete, exit test. + + else: + pytest.fail('It dint raise a connerr !?') + + + trio.run(main) diff --git a/tests/ipc/test_multi_tpt.py b/tests/ipc/test_multi_tpt.py new file mode 100644 index 00000000..353385e1 --- /dev/null +++ b/tests/ipc/test_multi_tpt.py @@ -0,0 +1,95 @@ +''' +Verify the `enable_transports` param drives various +per-root/sub-actor IPC endpoint/server settings. + +''' +from __future__ import annotations + +import pytest +import trio +import tractor +from tractor import ( + Actor, + Portal, + ipc, + msg, + _state, + _addr, +) + +@tractor.context +async def chk_tpts( + ctx: tractor.Context, + tpt_proto_key: str, +): + rtvars = _state._runtime_vars + assert ( + tpt_proto_key + in + rtvars['_enable_tpts'] + ) + actor: Actor = tractor.current_actor() + spec: msg.types.SpawnSpec = actor._spawn_spec + assert spec._runtime_vars == rtvars + + # ensure individual IPC ep-addr types + serv: ipc._server.Server = actor.ipc_server + addr: ipc._types.Address + for addr in serv.addrs: + assert addr.proto_key == tpt_proto_key + + # Actor delegate-props enforcement + assert ( + actor.accept_addrs + == + serv.accept_addrs + ) + + await ctx.started(serv.accept_addrs) + + +# TODO, parametrize over mis-matched-proto-typed `registry_addrs` +# since i seems to work in `piker` but not exactly sure if both tcp +# & uds are being deployed then? +# +@pytest.mark.parametrize( + 'tpt_proto_key', + ['tcp', 'uds'], + ids=lambda item: f'ipc_tpt={item!r}' +) +def test_root_passes_tpt_to_sub( + tpt_proto_key: str, + reg_addr: tuple, + debug_mode: bool, +): + async def main(): + async with tractor.open_nursery( + enable_transports=[tpt_proto_key], + registry_addrs=[reg_addr], + debug_mode=debug_mode, + ) as an: + + assert ( + tpt_proto_key + in + _state._runtime_vars['_enable_tpts'] + ) + + ptl: Portal = await an.start_actor( + name='sub', + enable_modules=[__name__], + ) + async with ptl.open_context( + chk_tpts, + tpt_proto_key=tpt_proto_key, + ) as (ctx, accept_addrs): + + uw_addr: tuple + for uw_addr in accept_addrs: + addr = _addr.wrap_address(uw_addr) + assert addr.is_valid + + # shudown sub-actor(s) + await an.cancel() + + trio.run(main) diff --git a/tests/ipc/test_server.py b/tests/ipc/test_server.py new file mode 100644 index 00000000..1d63bd1b --- /dev/null +++ b/tests/ipc/test_server.py @@ -0,0 +1,72 @@ +''' +High-level `.ipc._server` unit tests. + +''' +from __future__ import annotations + +import pytest +import trio +from tractor import ( + devx, + ipc, + log, +) +from tractor._testing.addr import ( + get_rando_addr, +) +# TODO, use/check-roundtripping with some of these wrapper types? +# +# from .._addr import Address +# from ._chan import Channel +# from ._transport import MsgTransport +# from ._uds import UDSAddress +# from ._tcp import TCPAddress + + +@pytest.mark.parametrize( + '_tpt_proto', + ['uds', 'tcp'] +) +def test_basic_ipc_server( + _tpt_proto: str, + debug_mode: bool, + loglevel: str, +): + + # so we see the socket-listener reporting on console + log.get_console_log("INFO") + + rando_addr: tuple = get_rando_addr( + tpt_proto=_tpt_proto, + ) + async def main(): + async with ipc._server.open_ipc_server() as server: + + assert ( + server._parent_tn + and + server._parent_tn is server._stream_handler_tn + ) + assert server._no_more_peers.is_set() + + eps: list[ipc._server.Endpoint] = await server.listen_on( + accept_addrs=[rando_addr], + stream_handler_nursery=None, + ) + assert ( + len(eps) == 1 + and + (ep := eps[0])._listener + and + not ep.peer_tpts + ) + + server._parent_tn.cancel_scope.cancel() + + # !TODO! actually make a bg-task connection from a client + # using `ipc._chan._connect_chan()` + + with devx.maybe_open_crash_handler( + pdb=debug_mode, + ): + trio.run(main) diff --git a/tests/test_advanced_faults.py b/tests/test_advanced_faults.py index de8a0e1c..061ae5aa 100644 --- a/tests/test_advanced_faults.py +++ b/tests/test_advanced_faults.py @@ -10,6 +10,9 @@ import pytest from _pytest.pathlib import import_path import trio import tractor +from tractor import ( + TransportClosed, +) from tractor._testing import ( examples_dir, break_ipc, @@ -74,6 +77,7 @@ def test_ipc_channel_break_during_stream( spawn_backend: str, ipc_break: dict|None, pre_aclose_msgstream: bool, + tpt_proto: str, ): ''' Ensure we can have an IPC channel break its connection during @@ -91,7 +95,7 @@ def test_ipc_channel_break_during_stream( # non-`trio` spawners should never hit the hang condition that # requires the user to do ctl-c to cancel the actor tree. # expect_final_exc = trio.ClosedResourceError - expect_final_exc = tractor.TransportClosed + expect_final_exc = TransportClosed mod: ModuleType = import_path( examples_dir() / 'advanced_faults' @@ -104,6 +108,8 @@ def test_ipc_channel_break_during_stream( # period" wherein the user eventually hits ctl-c to kill the # root-actor tree. expect_final_exc: BaseException = KeyboardInterrupt + expect_final_cause: BaseException|None = None + if ( # only expect EoC if trans is broken on the child side, ipc_break['break_child_ipc_after'] is not False @@ -138,6 +144,9 @@ def test_ipc_channel_break_during_stream( # a user sending ctl-c by raising a KBI. if pre_aclose_msgstream: expect_final_exc = KeyboardInterrupt + if tpt_proto == 'uds': + expect_final_exc = TransportClosed + expect_final_cause = trio.BrokenResourceError # XXX OLD XXX # if child calls `MsgStream.aclose()` then expect EoC. @@ -157,6 +166,10 @@ def test_ipc_channel_break_during_stream( if pre_aclose_msgstream: expect_final_exc = KeyboardInterrupt + if tpt_proto == 'uds': + expect_final_exc = TransportClosed + expect_final_cause = trio.BrokenResourceError + # NOTE when the parent IPC side dies (even if the child does as well # but the child fails BEFORE the parent) we always expect the # IPC layer to raise a closed-resource, NEVER do we expect @@ -169,8 +182,8 @@ def test_ipc_channel_break_during_stream( and ipc_break['break_child_ipc_after'] is False ): - # expect_final_exc = trio.ClosedResourceError expect_final_exc = tractor.TransportClosed + expect_final_cause = trio.ClosedResourceError # BOTH but, PARENT breaks FIRST elif ( @@ -181,8 +194,8 @@ def test_ipc_channel_break_during_stream( ipc_break['break_parent_ipc_after'] ) ): - # expect_final_exc = trio.ClosedResourceError expect_final_exc = tractor.TransportClosed + expect_final_cause = trio.ClosedResourceError with pytest.raises( expected_exception=( @@ -198,6 +211,7 @@ def test_ipc_channel_break_during_stream( start_method=spawn_backend, loglevel=loglevel, pre_close=pre_aclose_msgstream, + tpt_proto=tpt_proto, **ipc_break, ) ) @@ -220,10 +234,15 @@ def test_ipc_channel_break_during_stream( ) cause: Exception = tc.__cause__ assert ( - type(cause) is trio.ClosedResourceError - and - cause.args[0] == 'another task closed this fd' + # type(cause) is trio.ClosedResourceError + type(cause) is expect_final_cause + + # TODO, should we expect a certain exc-message (per + # tpt) as well?? + # and + # cause.args[0] == 'another task closed this fd' ) + raise # get raw instance from pytest wrapper diff --git a/tests/test_advanced_streaming.py b/tests/test_advanced_streaming.py index 64f24167..907a2196 100644 --- a/tests/test_advanced_streaming.py +++ b/tests/test_advanced_streaming.py @@ -313,9 +313,8 @@ async def inf_streamer( # `trio.EndOfChannel` doesn't propagate directly to the above # .open_stream() parent, resulting in it also raising instead # of gracefully absorbing as normal.. so how to handle? - trio.open_nursery( - strict_exception_groups=False, - ) as tn, + tractor.trionics.collapse_eg(), + trio.open_nursery() as tn, ): async def close_stream_on_sentinel(): async for msg in stream: diff --git a/tests/test_cancellation.py b/tests/test_cancellation.py index ca14ae4b..27fd59d7 100644 --- a/tests/test_cancellation.py +++ b/tests/test_cancellation.py @@ -236,7 +236,10 @@ async def stream_forever(): async def test_cancel_infinite_streamer(start_method): # stream for at most 1 seconds - with trio.move_on_after(1) as cancel_scope: + with ( + trio.fail_after(4), + trio.move_on_after(1) as cancel_scope + ): async with tractor.open_nursery() as n: portal = await n.start_actor( 'donny', @@ -284,20 +287,32 @@ async def test_cancel_infinite_streamer(start_method): ], ) @tractor_test -async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): - """Verify a subset of failed subactors causes all others in +async def test_some_cancels_all( + num_actors_and_errs: tuple, + start_method: str, + loglevel: str, +): + ''' + Verify a subset of failed subactors causes all others in the nursery to be cancelled just like the strategy in trio. This is the first and only supervisory strategy at the moment. - """ - num_actors, first_err, err_type, ria_func, da_func = num_actors_and_errs + + ''' + ( + num_actors, + first_err, + err_type, + ria_func, + da_func, + ) = num_actors_and_errs try: - async with tractor.open_nursery() as n: + async with tractor.open_nursery() as an: # spawn the same number of deamon actors which should be cancelled dactor_portals = [] for i in range(num_actors): - dactor_portals.append(await n.start_actor( + dactor_portals.append(await an.start_actor( f'deamon_{i}', enable_modules=[__name__], )) @@ -307,7 +322,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): for i in range(num_actors): # start actor(s) that will fail immediately riactor_portals.append( - await n.run_in_actor( + await an.run_in_actor( func, name=f'actor_{i}', **kwargs @@ -337,7 +352,8 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): # should error here with a ``RemoteActorError`` or ``MultiError`` - except first_err as err: + except first_err as _err: + err = _err if isinstance(err, BaseExceptionGroup): assert len(err.exceptions) == num_actors for exc in err.exceptions: @@ -348,8 +364,8 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): elif isinstance(err, tractor.RemoteActorError): assert err.boxed_type == err_type - assert n.cancelled is True - assert not n._children + assert an.cancelled is True + assert not an._children else: pytest.fail("Should have gotten a remote assertion error?") @@ -519,10 +535,15 @@ def test_cancel_via_SIGINT_other_task( async def main(): # should never timeout since SIGINT should cancel the current program with trio.fail_after(timeout): - async with trio.open_nursery( - strict_exception_groups=False, - ) as n: - await n.start(spawn_and_sleep_forever) + async with ( + + # XXX ?TODO? why no work!? + # tractor.trionics.collapse_eg(), + trio.open_nursery( + strict_exception_groups=False, + ) as tn, + ): + await tn.start(spawn_and_sleep_forever) if 'mp' in spawn_backend: time.sleep(0.1) os.kill(pid, signal.SIGINT) @@ -533,38 +554,123 @@ def test_cancel_via_SIGINT_other_task( async def spin_for(period=3): "Sync sleep." + print(f'sync sleeping in sub-sub for {period}\n') time.sleep(period) -async def spawn(): - async with tractor.open_nursery() as tn: - await tn.run_in_actor( +async def spawn_sub_with_sync_blocking_task(): + async with tractor.open_nursery() as an: + print('starting sync blocking subactor..\n') + await an.run_in_actor( spin_for, name='sleeper', ) + print('exiting first subactor layer..\n') +@pytest.mark.parametrize( + 'man_cancel_outer', + [ + False, # passes if delay != 2 + + # always causes an unexpected eg-w-embedded-assert-err? + pytest.param(True, + marks=pytest.mark.xfail( + reason=( + 'always causes an unexpected eg-w-embedded-assert-err?' + ) + ), + ), + ], +) @no_windows def test_cancel_while_childs_child_in_sync_sleep( - loglevel, - start_method, - spawn_backend, + loglevel: str, + start_method: str, + spawn_backend: str, + debug_mode: bool, + reg_addr: tuple, + man_cancel_outer: bool, ): - """Verify that a child cancelled while executing sync code is torn + ''' + Verify that a child cancelled while executing sync code is torn down even when that cancellation is triggered by the parent 2 nurseries "up". - """ + + Though the grandchild should stay blocking its actor runtime, its + parent should issue a "zombie reaper" to hard kill it after + sufficient timeout. + + ''' if start_method == 'forkserver': pytest.skip("Forksever sux hard at resuming from sync sleep...") async def main(): - with trio.fail_after(2): - async with tractor.open_nursery() as tn: - await tn.run_in_actor( - spawn, - name='spawn', + # + # XXX BIG TODO NOTE XXX + # + # it seems there's a strange race that can happen + # where where the fail-after will trigger outer scope + # .cancel() which then causes the inner scope to raise, + # + # BaseExceptionGroup('Exceptions from Trio nursery', [ + # BaseExceptionGroup('Exceptions from Trio nursery', + # [ + # Cancelled(), + # Cancelled(), + # ] + # ), + # AssertionError('assert 0') + # ]) + # + # WHY THIS DOESN'T MAKE SENSE: + # --------------------------- + # - it should raise too-slow-error when too slow.. + # * verified that using simple-cs and manually cancelling + # you get same outcome -> indicates that the fail-after + # can have its TooSlowError overriden! + # |_ to check this it's easy, simplly decrease the timeout + # as per the var below. + # + # - when using the manual simple-cs the outcome is different + # DESPITE the `assert 0` which means regardless of the + # inner scope effectively failing in the same way, the + # bubbling up **is NOT the same**. + # + # delays trigger diff outcomes.. + # --------------------------- + # as seen by uncommenting various lines below there is from + # my POV an unexpected outcome due to the delay=2 case. + # + # delay = 1 # no AssertionError in eg, TooSlowError raised. + # delay = 2 # is AssertionError in eg AND no TooSlowError !? + delay = 4 # is AssertionError in eg AND no _cs cancellation. + + with trio.fail_after(delay) as _cs: + # with trio.CancelScope() as cs: + # ^XXX^ can be used instead to see same outcome. + + async with ( + # tractor.trionics.collapse_eg(), # doesn't help + tractor.open_nursery( + hide_tb=False, + debug_mode=debug_mode, + registry_addrs=[reg_addr], + ) as an, + ): + await an.run_in_actor( + spawn_sub_with_sync_blocking_task, + name='sync_blocking_sub', ) await trio.sleep(1) + + if man_cancel_outer: + print('Cancelling manually in root') + _cs.cancel() + + # trigger exc-srced taskc down + # the actor tree. + print('RAISING IN ROOT') assert 0 with pytest.raises(AssertionError): diff --git a/tests/test_child_manages_service_nursery.py b/tests/test_child_manages_service_nursery.py index 540e9b2e..6379afc6 100644 --- a/tests/test_child_manages_service_nursery.py +++ b/tests/test_child_manages_service_nursery.py @@ -117,9 +117,10 @@ async def open_actor_local_nursery( ctx: tractor.Context, ): global _nursery - async with trio.open_nursery( - strict_exception_groups=False, - ) as tn: + async with ( + tractor.trionics.collapse_eg(), + trio.open_nursery() as tn + ): _nursery = tn await ctx.started() await trio.sleep(10) diff --git a/tests/test_clustering.py b/tests/test_clustering.py index 92362b58..603b2eb4 100644 --- a/tests/test_clustering.py +++ b/tests/test_clustering.py @@ -13,26 +13,24 @@ MESSAGE = 'tractoring at full speed' def test_empty_mngrs_input_raises() -> None: async def main(): - with trio.fail_after(1): + with trio.fail_after(3): async with ( open_actor_cluster( modules=[__name__], # NOTE: ensure we can passthrough runtime opts - loglevel='info', - # debug_mode=True, + loglevel='cancel', + debug_mode=False, ) as portals, - gather_contexts( - # NOTE: it's the use of inline-generator syntax - # here that causes the empty input. - mngrs=( - p.open_context(worker) for p in portals.values() - ), - ), + gather_contexts(mngrs=()), ): - assert 0 + # should fail before this? + assert portals + + # test should fail if we mk it here! + assert 0, 'Should have raised val-err !?' with pytest.raises(ValueError): trio.run(main) diff --git a/tests/test_context_stream_semantics.py b/tests/test_context_stream_semantics.py index 14cb9cc6..4c347e91 100644 --- a/tests/test_context_stream_semantics.py +++ b/tests/test_context_stream_semantics.py @@ -252,7 +252,7 @@ def test_simple_context( pass except BaseExceptionGroup as beg: # XXX: on windows it seems we may have to expect the group error - from tractor._exceptions import is_multi_cancelled + from tractor.trionics import is_multi_cancelled assert is_multi_cancelled(beg) else: trio.run(main) diff --git a/tests/test_discovery.py b/tests/test_discovery.py index 87455983..453b1aa3 100644 --- a/tests/test_discovery.py +++ b/tests/test_discovery.py @@ -7,8 +7,11 @@ import platform from functools import partial import itertools +import psutil import pytest +import subprocess import tractor +from tractor.trionics import collapse_eg from tractor._testing import tractor_test import trio @@ -26,7 +29,7 @@ async def test_reg_then_unreg(reg_addr): portal = await n.start_actor('actor', enable_modules=[__name__]) uid = portal.channel.uid - async with tractor.get_registry(*reg_addr) as aportal: + async with tractor.get_registry(reg_addr) as aportal: # this local actor should be the arbiter assert actor is aportal.actor @@ -152,15 +155,25 @@ async def unpack_reg(actor_or_portal): async def spawn_and_check_registry( reg_addr: tuple, use_signal: bool, + debug_mode: bool = False, remote_arbiter: bool = False, with_streaming: bool = False, + maybe_daemon: tuple[ + subprocess.Popen, + psutil.Process, + ]|None = None, ) -> None: + if maybe_daemon: + popen, proc = maybe_daemon + # breakpoint() + async with tractor.open_root_actor( registry_addrs=[reg_addr], + debug_mode=debug_mode, ): - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: # runtime needs to be up to call this actor = tractor.current_actor() @@ -176,30 +189,30 @@ async def spawn_and_check_registry( extra = 2 # local root actor + remote arbiter # ensure current actor is registered - registry = await get_reg() + registry: dict = await get_reg() assert actor.uid in registry try: - async with tractor.open_nursery() as n: - async with trio.open_nursery( - strict_exception_groups=False, - ) as trion: - + async with tractor.open_nursery() as an: + async with ( + collapse_eg(), + trio.open_nursery() as trion, + ): portals = {} for i in range(3): name = f'a{i}' if with_streaming: - portals[name] = await n.start_actor( + portals[name] = await an.start_actor( name=name, enable_modules=[__name__]) else: # no streaming - portals[name] = await n.run_in_actor( + portals[name] = await an.run_in_actor( trio.sleep_forever, name=name) # wait on last actor to come up async with tractor.wait_for_actor(name): registry = await get_reg() - for uid in n._children: + for uid in an._children: assert uid in registry assert len(portals) + extra == len(registry) @@ -232,6 +245,7 @@ async def spawn_and_check_registry( @pytest.mark.parametrize('use_signal', [False, True]) @pytest.mark.parametrize('with_streaming', [False, True]) def test_subactors_unregister_on_cancel( + debug_mode: bool, start_method, use_signal, reg_addr, @@ -248,6 +262,7 @@ def test_subactors_unregister_on_cancel( spawn_and_check_registry, reg_addr, use_signal, + debug_mode=debug_mode, remote_arbiter=False, with_streaming=with_streaming, ), @@ -257,7 +272,8 @@ def test_subactors_unregister_on_cancel( @pytest.mark.parametrize('use_signal', [False, True]) @pytest.mark.parametrize('with_streaming', [False, True]) def test_subactors_unregister_on_cancel_remote_daemon( - daemon, + daemon: subprocess.Popen, + debug_mode: bool, start_method, use_signal, reg_addr, @@ -273,8 +289,13 @@ def test_subactors_unregister_on_cancel_remote_daemon( spawn_and_check_registry, reg_addr, use_signal, + debug_mode=debug_mode, remote_arbiter=True, with_streaming=with_streaming, + maybe_daemon=( + daemon, + psutil.Process(daemon.pid) + ), ), ) @@ -300,7 +321,7 @@ async def close_chans_before_nursery( async with tractor.open_root_actor( registry_addrs=[reg_addr], ): - async with tractor.get_registry(*reg_addr) as aportal: + async with tractor.get_registry(reg_addr) as aportal: try: get_reg = partial(unpack_reg, aportal) @@ -318,11 +339,12 @@ async def close_chans_before_nursery( async with portal2.open_stream_from( stream_forever ) as agen2: - async with trio.open_nursery( - strict_exception_groups=False, - ) as n: - n.start_soon(streamer, agen1) - n.start_soon(cancel, use_signal, .5) + async with ( + collapse_eg(), + trio.open_nursery() as tn, + ): + tn.start_soon(streamer, agen1) + tn.start_soon(cancel, use_signal, .5) try: await streamer(agen2) finally: @@ -373,7 +395,7 @@ def test_close_channel_explicit( @pytest.mark.parametrize('use_signal', [False, True]) def test_close_channel_explicit_remote_arbiter( - daemon, + daemon: subprocess.Popen, start_method, use_signal, reg_addr, diff --git a/tests/test_docs_examples.py b/tests/test_docs_examples.py index cc4904f8..6250e0aa 100644 --- a/tests/test_docs_examples.py +++ b/tests/test_docs_examples.py @@ -66,6 +66,9 @@ def run_example_in_subproc( # due to backpressure!!! proc = testdir.popen( cmdargs, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, **kwargs, ) assert not proc.returncode @@ -119,10 +122,14 @@ def test_example( code = ex.read() with run_example_in_subproc(code) as proc: - proc.wait() - err, _ = proc.stderr.read(), proc.stdout.read() - # print(f'STDERR: {err}') - # print(f'STDOUT: {out}') + err = None + try: + if not proc.poll(): + _, err = proc.communicate(timeout=15) + + except subprocess.TimeoutExpired as e: + proc.kill() + err = e.stderr # if we get some gnarly output let's aggregate and raise if err: diff --git a/tests/test_infected_asyncio.py b/tests/test_infected_asyncio.py index 465decca..f11a4eed 100644 --- a/tests/test_infected_asyncio.py +++ b/tests/test_infected_asyncio.py @@ -234,10 +234,8 @@ async def trio_ctx( with trio.fail_after(1 + delay): try: async with ( - trio.open_nursery( - # TODO, for new `trio` / py3.13 - # strict_exception_groups=False, - ) as tn, + tractor.trionics.collapse_eg(), + trio.open_nursery() as tn, tractor.to_asyncio.open_channel_from( sleep_and_err, ) as (first, chan), @@ -573,14 +571,16 @@ def test_basic_interloop_channel_stream( fan_out: bool, ): async def main(): - async with tractor.open_nursery() as an: - portal = await an.run_in_actor( - stream_from_aio, - infect_asyncio=True, - fan_out=fan_out, - ) - # should raise RAE diectly - await portal.result() + # TODO, figure out min timeout here! + with trio.fail_after(6): + async with tractor.open_nursery() as an: + portal = await an.run_in_actor( + stream_from_aio, + infect_asyncio=True, + fan_out=fan_out, + ) + # should raise RAE diectly + await portal.result() trio.run(main) @@ -889,7 +889,7 @@ async def manage_file( # NOTE: turns out you don't even need to sched an aio task # since the original issue, even though seemingly was due to - # the guest-run being abandoned + a `._debug.pause()` inside + # the guest-run being abandoned + a `.debug.pause()` inside # `._runtime._async_main()` (which was originally trying to # debug the `.lifetime_stack` not closing), IS NOT actually # the core issue? @@ -1088,6 +1088,108 @@ def test_sigint_closes_lifetime_stack( trio.run(main) + +# ?TODO asyncio.Task fn-deco? +# -[ ] do sig checkingat import time like @context? +# -[ ] maybe name it @aio_task ?? +# -[ ] chan: to_asyncio.InterloopChannel ?? +async def raise_before_started( + # from_trio: asyncio.Queue, + # to_trio: trio.abc.SendChannel, + chan: to_asyncio.LinkedTaskChannel, + +) -> None: + ''' + `asyncio.Task` entry point which RTEs before calling + `to_trio.send_nowait()`. + + ''' + await asyncio.sleep(0.2) + raise RuntimeError('Some shite went wrong before `.send_nowait()`!!') + + # to_trio.send_nowait('Uhh we shouldve RTE-d ^^ ??') + chan.started_nowait('Uhh we shouldve RTE-d ^^ ??') + await asyncio.sleep(float('inf')) + + +@tractor.context +async def caching_ep( + ctx: tractor.Context, +): + + log = tractor.log.get_logger('caching_ep') + log.info('syncing via `ctx.started()`') + await ctx.started() + + # XXX, allocate the `open_channel_from()` inside + # a `.trionics.maybe_open_context()`. + chan: to_asyncio.LinkedTaskChannel + async with ( + tractor.trionics.maybe_open_context( + acm_func=tractor.to_asyncio.open_channel_from, + kwargs={ + 'target': raise_before_started, + # ^XXX, kwarg to `open_channel_from()` + }, + + # lock around current actor task access + key=tractor.current_actor().uid, + + ) as (cache_hit, (clients, chan)), + ): + if cache_hit: + log.error( + 'Re-using cached `.open_from_channel()` call!\n' + ) + + else: + log.info( + 'Allocating SHOULD-FAIL `.open_from_channel()`\n' + ) + + await trio.sleep_forever() + + +def test_aio_side_raises_before_started( + reg_addr: tuple[str, int], + debug_mode: bool, + loglevel: str, +): + ''' + Simulates connection-err from `piker.brokers.ib.api`.. + + Ensure any error raised by child-`asyncio.Task` BEFORE + `chan.started()` + + ''' + # delay = 999 if debug_mode else 1 + async def main(): + with trio.fail_after(3): + an: tractor.ActorNursery + async with tractor.open_nursery( + debug_mode=debug_mode, + loglevel=loglevel, + ) as an: + p: tractor.Portal = await an.start_actor( + 'lchan_cacher_that_raises_fast', + enable_modules=[__name__], + infect_asyncio=True, + ) + async with p.open_context( + caching_ep, + ) as (ctx, first): + assert not first + + with pytest.raises( + expected_exception=(RemoteActorError), + ) as excinfo: + trio.run(main) + + # ensure `asyncio.Task` exception is bubbled + # allll the way erp!! + rae = excinfo.value + assert rae.boxed_type is RuntimeError + # TODO: debug_mode tests once we get support for `asyncio`! # # -[ ] need tests to wrap both scripts: @@ -1101,7 +1203,7 @@ def test_sigint_closes_lifetime_stack( # => completed using `.bestow_portal(task)` inside # `.to_asyncio._run_asyncio_task()` right? # -[ ] translation func to get from `asyncio` task calling to -# `._debug.wait_for_parent_stdin_hijack()` which does root +# `.debug.wait_for_parent_stdin_hijack()` which does root # call to do TTY locking. # def test_sync_breakpoint(): diff --git a/tests/test_inter_peer_cancellation.py b/tests/test_inter_peer_cancellation.py index bac9a791..b6d469d9 100644 --- a/tests/test_inter_peer_cancellation.py +++ b/tests/test_inter_peer_cancellation.py @@ -410,7 +410,6 @@ def test_peer_canceller( ''' async def main(): async with tractor.open_nursery( - # NOTE: to halt the peer tasks on ctxc, uncomment this. debug_mode=debug_mode, ) as an: canceller: Portal = await an.start_actor( @@ -871,7 +870,7 @@ async def serve_subactors( ) await ipc.send(( peer.chan.uid, - peer.chan.raddr, + peer.chan.raddr.unwrap(), )) print('Spawner exiting spawn serve loop!') diff --git a/tests/test_legacy_one_way_streaming.py b/tests/test_legacy_one_way_streaming.py index 6092bca7..10cf3aed 100644 --- a/tests/test_legacy_one_way_streaming.py +++ b/tests/test_legacy_one_way_streaming.py @@ -235,10 +235,16 @@ async def cancel_after(wait, reg_addr): @pytest.fixture(scope='module') -def time_quad_ex(reg_addr, ci_env, spawn_backend): +def time_quad_ex( + reg_addr: tuple, + ci_env: bool, + spawn_backend: str, +): if spawn_backend == 'mp': - """no idea but the mp *nix runs are flaking out here often... - """ + ''' + no idea but the mp *nix runs are flaking out here often... + + ''' pytest.skip("Test is too flaky on mp in CI") timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4 @@ -249,12 +255,24 @@ def time_quad_ex(reg_addr, ci_env, spawn_backend): return results, diff -def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend): - """This also serves as a kind of "we'd like to be this fast test".""" +def test_a_quadruple_example( + time_quad_ex: tuple, + ci_env: bool, + spawn_backend: str, +): + ''' + This also serves as a kind of "we'd like to be this fast test". + ''' results, diff = time_quad_ex assert results - this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3 + this_fast = ( + 6 if platform.system() in ( + 'Windows', + 'Darwin', + ) + else 3 + ) assert diff < this_fast diff --git a/tests/test_local.py b/tests/test_local.py index ecdad5fe..c6f5047a 100644 --- a/tests/test_local.py +++ b/tests/test_local.py @@ -38,7 +38,7 @@ async def test_self_is_registered_localportal(reg_addr): "Verify waiting on the arbiter to register itself using a local portal." actor = tractor.current_actor() assert actor.is_arbiter - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: assert isinstance(portal, tractor._portal.LocalPortal) with trio.fail_after(0.2): diff --git a/tests/test_multi_program.py b/tests/test_multi_program.py index 860eeebb..b0b145ee 100644 --- a/tests/test_multi_program.py +++ b/tests/test_multi_program.py @@ -32,7 +32,7 @@ def test_abort_on_sigint(daemon): @tractor_test async def test_cancel_remote_arbiter(daemon, reg_addr): assert not tractor.current_actor().is_arbiter - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: await portal.cancel_actor() time.sleep(0.1) @@ -41,7 +41,7 @@ async def test_cancel_remote_arbiter(daemon, reg_addr): # no arbiter socket should exist with pytest.raises(OSError): - async with tractor.get_registry(*reg_addr) as portal: + async with tractor.get_registry(reg_addr) as portal: pass diff --git a/tests/test_remote_exc_relay.py b/tests/test_remote_exc_relay.py new file mode 100644 index 00000000..c2bb1ea3 --- /dev/null +++ b/tests/test_remote_exc_relay.py @@ -0,0 +1,237 @@ +''' +Special case testing for issues not (dis)covered in the primary +`Context` related functional/scenario suites. + +**NOTE: this mod is a WIP** space for handling +odd/rare/undiscovered/not-yet-revealed faults which either +loudly (ideal case) breakl our supervision protocol +or (worst case) result in distributed sys hangs. + +Suites here further try to clarify (if [partially] ill-defined) and +verify our edge case semantics for inter-actor-relayed-exceptions +including, + +- lowlevel: what remote obj-data is interchanged for IPC and what is + native-obj form is expected from unpacking in the the new + mem-domain. + +- which kinds of `RemoteActorError` (and its derivs) are expected by which + (types of) peers (parent, child, sibling, etc) with what + particular meta-data set such as, + + - `.src_uid`: the original (maybe) peer who raised. + - `.relay_uid`: the next-hop-peer who sent it. + - `.relay_path`: the sequence of peer actor hops. + - `.is_inception`: a predicate that denotes multi-hop remote errors. + +- when should `ExceptionGroup`s be relayed from a particular + remote endpoint, they should never be caused by implicit `._rpc` + nursery machinery! + +- various special `trio` edge cases around its cancellation semantics + and how we (currently) leverage `trio.Cancelled` as a signal for + whether a `Context` task should raise `ContextCancelled` (ctx). + +''' +import pytest +import trio +import tractor +from tractor import ( # typing + ActorNursery, + Portal, + Context, + ContextCancelled, +) + + +@tractor.context +async def sleep_n_chkpt_in_finally( + ctx: Context, + sleep_n_raise: bool, + + chld_raise_delay: float, + chld_finally_delay: float, + + rent_cancels: bool, + rent_ctxc_delay: float, + + expect_exc: str|None = None, + +) -> None: + ''' + Sync, open a tn, then wait for cancel, run a chkpt inside + the user's `finally:` teardown. + + This covers a footgun case that `trio` core doesn't seem to care about + wherein an exc can be masked by a `trio.Cancelled` raised inside a tn emedded + `finally:`. + + Also see `test_trioisms::test_acm_embedded_nursery_propagates_enter_err` + for the down and gritty details. + + Since a `@context` endpoint fn can also contain code like this, + **and** bc we currently have no easy way other then + `trio.Cancelled` to signal cancellation on each side of an IPC `Context`, + the footgun issue can compound itself as demonstrated in this suite.. + + Here are some edge cases codified with our WIP "sclang" syntax + (note the parent(rent)/child(chld) naming here is just + pragmatism, generally these most of these cases can occurr + regardless of the distributed-task's supervision hiearchy), + + - rent c)=> chld.raises-then-taskc-in-finally + |_ chld's body raises an `exc: BaseException`. + _ in its `finally:` block it runs a chkpoint + which raises a taskc (`trio.Cancelled`) which + masks `exc` instead raising taskc up to the first tn. + _ the embedded/chld tn captures the masking taskc and then + raises it up to the ._rpc-ep-tn instead of `exc`. + _ the rent thinks the child ctxc-ed instead of errored.. + + ''' + await ctx.started() + + if expect_exc: + expect_exc: BaseException = tractor._exceptions.get_err_type( + type_name=expect_exc, + ) + + berr: BaseException|None = None + try: + if not sleep_n_raise: + await trio.sleep_forever() + elif sleep_n_raise: + + # XXX this sleep is less then the sleep the parent + # does before calling `ctx.cancel()` + await trio.sleep(chld_raise_delay) + + # XXX this will be masked by a taskc raised in + # the `finally:` if this fn doesn't terminate + # before any ctxc-req arrives AND a checkpoint is hit + # in that `finally:`. + raise RuntimeError('my app krurshed..') + + except BaseException as _berr: + berr = _berr + + # TODO: it'd sure be nice to be able to inject our own + # `ContextCancelled` here instead of of `trio.Cancelled` + # so that our runtime can expect it and this "user code" + # would be able to tell the diff between a generic trio + # cancel and a tractor runtime-IPC cancel. + if expect_exc: + if not isinstance( + berr, + expect_exc, + ): + raise ValueError( + f'Unexpected exc type ??\n' + f'{berr!r}\n' + f'\n' + f'Expected a {expect_exc!r}\n' + ) + + raise berr + + # simulate what user code might try even though + # it's a known boo-boo.. + finally: + # maybe wait for rent ctxc to arrive + with trio.CancelScope(shield=True): + await trio.sleep(chld_finally_delay) + + # !!XXX this will raise `trio.Cancelled` which + # will mask the RTE from above!!! + # + # YES, it's the same case as our extant + # `test_trioisms::test_acm_embedded_nursery_propagates_enter_err` + try: + await trio.lowlevel.checkpoint() + except trio.Cancelled as taskc: + if (scope_err := taskc.__context__): + print( + f'XXX MASKED REMOTE ERROR XXX\n' + f'ENDPOINT exception -> {scope_err!r}\n' + f'will be masked by -> {taskc!r}\n' + ) + # await tractor.pause(shield=True) + + raise taskc + + +@pytest.mark.parametrize( + 'chld_callspec', + [ + dict( + sleep_n_raise=None, + chld_raise_delay=0.1, + chld_finally_delay=0.1, + expect_exc='Cancelled', + rent_cancels=True, + rent_ctxc_delay=0.1, + ), + dict( + sleep_n_raise='RuntimeError', + chld_raise_delay=0.1, + chld_finally_delay=1, + expect_exc='RuntimeError', + rent_cancels=False, + rent_ctxc_delay=0.1, + ), + ], + ids=lambda item: f'chld_callspec={item!r}' +) +def test_unmasked_remote_exc( + debug_mode: bool, + chld_callspec: dict, + tpt_proto: str, +): + expect_exc_str: str|None = chld_callspec['sleep_n_raise'] + rent_ctxc_delay: float|None = chld_callspec['rent_ctxc_delay'] + async def main(): + an: ActorNursery + async with tractor.open_nursery( + debug_mode=debug_mode, + enable_transports=[tpt_proto], + ) as an: + ptl: Portal = await an.start_actor( + 'cancellee', + enable_modules=[__name__], + ) + ctx: Context + async with ( + ptl.open_context( + sleep_n_chkpt_in_finally, + **chld_callspec, + ) as (ctx, sent), + ): + assert not sent + await trio.sleep(rent_ctxc_delay) + await ctx.cancel() + + # recv error or result from chld + ctxc: ContextCancelled = await ctx.wait_for_result() + assert ( + ctxc is ctx.outcome + and + isinstance(ctxc, ContextCancelled) + ) + + # always graceful terminate the sub in non-error cases + await an.cancel() + + if expect_exc_str: + expect_exc: BaseException = tractor._exceptions.get_err_type( + type_name=expect_exc_str, + ) + with pytest.raises( + expected_exception=tractor.RemoteActorError, + ) as excinfo: + trio.run(main) + + rae = excinfo.value + assert expect_exc == rae.boxed_type + + else: + trio.run(main) diff --git a/tests/test_resource_cache.py b/tests/test_resource_cache.py index d3859814..10eb3d84 100644 --- a/tests/test_resource_cache.py +++ b/tests/test_resource_cache.py @@ -1,5 +1,6 @@ ''' -Async context manager cache api testing: ``trionics.maybe_open_context():`` +Suites for our `.trionics.maybe_open_context()` multi-task +shared-cached `@acm` API. ''' from contextlib import asynccontextmanager as acm @@ -9,6 +10,15 @@ from typing import Awaitable import pytest import trio import tractor +from tractor.trionics import ( + maybe_open_context, +) +from tractor.log import ( + get_console_log, + get_logger, +) +log = get_logger(__name__) + _resource: int = 0 @@ -52,7 +62,7 @@ def test_resource_only_entered_once(key_on): # different task names per task will be used kwargs = {'task_name': name} - async with tractor.trionics.maybe_open_context( + async with maybe_open_context( maybe_increment_counter, kwargs=kwargs, key=key, @@ -72,11 +82,13 @@ def test_resource_only_entered_once(key_on): with trio.move_on_after(0.5): async with ( tractor.open_root_actor(), - trio.open_nursery() as n, + trio.open_nursery() as tn, ): - for i in range(10): - n.start_soon(enter_cached_mngr, f'task_{i}') + tn.start_soon( + enter_cached_mngr, + f'task_{i}', + ) await trio.sleep(0.001) trio.run(main) @@ -98,27 +110,55 @@ async def streamer( @acm -async def open_stream() -> Awaitable[tractor.MsgStream]: +async def open_stream() -> Awaitable[ + tuple[ + tractor.ActorNursery, + tractor.MsgStream, + ] +]: + try: + async with tractor.open_nursery() as an: + portal = await an.start_actor( + 'streamer', + enable_modules=[__name__], + ) + try: + async with ( + portal.open_context(streamer) as (ctx, first), + ctx.open_stream() as stream, + ): + print('Entered open_stream() caller') + yield an, stream + print('Exited open_stream() caller') - async with tractor.open_nursery() as tn: - portal = await tn.start_actor('streamer', enable_modules=[__name__]) - async with ( - portal.open_context(streamer) as (ctx, first), - ctx.open_stream() as stream, - ): - yield stream + finally: + print( + 'Cancelling streamer with,\n' + '=> `Portal.cancel_actor()`' + ) + await portal.cancel_actor() + print('Cancelled streamer') - await portal.cancel_actor() - print('CANCELLED STREAMER') + except Exception as err: + print( + f'`open_stream()` errored?\n' + f'{err!r}\n' + ) + await tractor.pause(shield=True) + raise err @acm async def maybe_open_stream(taskname: str): - async with tractor.trionics.maybe_open_context( + async with maybe_open_context( # NOTE: all secondary tasks should cache hit on the same key acm_func=open_stream, - ) as (cache_hit, stream): - + ) as ( + cache_hit, + (an, stream) + ): + # when the actor + portal + ctx + stream has already been + # allocated we want to just bcast to this task. if cache_hit: print(f'{taskname} loaded from cache') @@ -126,27 +166,77 @@ async def maybe_open_stream(taskname: str): # if this feed is already allocated by the first # task that entereed async with stream.subscribe() as bstream: - yield bstream + yield an, bstream + print( + f'cached task exited\n' + f')>\n' + f' |_{taskname}\n' + ) + + # we should always unreg the "cloned" bcrc for this + # consumer-task + assert id(bstream) not in bstream._state.subs + else: # yield the actual stream - yield stream + try: + yield an, stream + finally: + print( + f'NON-cached task exited\n' + f')>\n' + f' |_{taskname}\n' + ) + + first_bstream = stream._broadcaster + bcrx_state = first_bstream._state + subs: dict[int, int] = bcrx_state.subs + if len(subs) == 1: + assert id(first_bstream) in subs + # ^^TODO! the bcrx should always de-allocate all subs, + # including the implicit first one allocated on entry + # by the first subscribing peer task, no? + # + # -[ ] adjust `MsgStream.subscribe()` to do this mgmt! + # |_ allows reverting `MsgStream.receive()` to the + # non-bcaster method. + # |_ we can decide whether to reset `._broadcaster`? + # + # await tractor.pause(shield=True) -def test_open_local_sub_to_stream(): +def test_open_local_sub_to_stream( + debug_mode: bool, +): ''' Verify a single inter-actor stream can can be fanned-out shared to - N local tasks using ``trionics.maybe_open_context():``. + N local tasks using `trionics.maybe_open_context()`. ''' - timeout: float = 3.6 if platform.system() != "Windows" else 10 + timeout: float = 3.6 + if platform.system() == "Windows": + timeout: float = 10 + + if debug_mode: + timeout = 999 + print(f'IN debug_mode, setting large timeout={timeout!r}..') async def main(): full = list(range(1000)) + an: tractor.ActorNursery|None = None + num_tasks: int = 10 async def get_sub_and_pull(taskname: str): + + nonlocal an + + stream: tractor.MsgStream async with ( - maybe_open_stream(taskname) as stream, + maybe_open_stream(taskname) as ( + an, + stream, + ), ): if '0' in taskname: assert isinstance(stream, tractor.MsgStream) @@ -158,24 +248,159 @@ def test_open_local_sub_to_stream(): first = await stream.receive() print(f'{taskname} started with value {first}') - seq = [] + seq: list[int] = [] async for msg in stream: seq.append(msg) assert set(seq).issubset(set(full)) + + # end of @acm block print(f'{taskname} finished') - with trio.fail_after(timeout): + root: tractor.Actor + with trio.fail_after(timeout) as cs: # TODO: turns out this isn't multi-task entrant XD # We probably need an indepotent entry semantic? - async with tractor.open_root_actor(): + async with tractor.open_root_actor( + debug_mode=debug_mode, + # maybe_enable_greenback=True, + # + # ^TODO? doesn't seem to mk breakpoint() usage work + # bc each bg task needs to open a portal?? + # - [ ] we should consider making this part of + # our taskman defaults? + # |_see https://github.com/goodboy/tractor/pull/363 + # + ) as root: + assert root.is_registrar + async with ( - trio.open_nursery() as nurse, + trio.open_nursery() as tn, ): - for i in range(10): - nurse.start_soon(get_sub_and_pull, f'task_{i}') + for i in range(num_tasks): + tn.start_soon( + get_sub_and_pull, + f'task_{i}', + ) await trio.sleep(0.001) - print('all consumer tasks finished') + print('all consumer tasks finished!') + + # ?XXX, ensure actor-nursery is shutdown or we might + # hang here due to a minor task deadlock/race-condition? + # + # - seems that all we need is a checkpoint to ensure + # the last suspended task, which is inside + # `.maybe_open_context()`, can do the + # `Portal.cancel_actor()` call? + # + # - if that bg task isn't resumed, then this blocks + # timeout might hit before that? + # + if root.ipc_server.has_peers(): + await trio.lowlevel.checkpoint() + + # alt approach, cancel the entire `an` + # await tractor.pause() + # await an.cancel() + + # end of runtime scope + print('root actor terminated.') + + if cs.cancelled_caught: + pytest.fail( + 'Should NOT time out in `open_root_actor()` ?' + ) + + print('exiting main.') + + trio.run(main) + + + +@acm +async def cancel_outer_cs( + cs: trio.CancelScope|None = None, + delay: float = 0, +): + # on first task delay this enough to block + # the 2nd task but then cancel it mid sleep + # so that the tn.start() inside the key-err handler block + # is cancelled and would previously corrupt the + # mutext state. + log.info(f'task entering sleep({delay})') + await trio.sleep(delay) + if cs: + log.info('task calling cs.cancel()') + cs.cancel() + trio.lowlevel.checkpoint() + yield + await trio.sleep_forever() + + +def test_lock_not_corrupted_on_fast_cancel( + debug_mode: bool, + loglevel: str, +): + ''' + Verify that if the caching-task (the first to enter + `maybe_open_context()`) is cancelled mid-cache-miss, the embedded + mutex can never be left in a corrupted state. + + That is, the lock is always eventually released ensuring a peer + (cache-hitting) task will never, + + - be left to inf-block/hang on the `lock.acquire()`. + - try to release the lock when still owned by the caching-task + due to it having erronously exited without calling + `lock.release()`. + + + ''' + delay: float = 1. + + async def use_moc( + cs: trio.CancelScope|None, + delay: float, + ): + log.info('task entering moc') + async with maybe_open_context( + cancel_outer_cs, + kwargs={ + 'cs': cs, + 'delay': delay, + }, + ) as (cache_hit, _null): + if cache_hit: + log.info('2nd task entered') + else: + log.info('1st task entered') + + await trio.sleep_forever() + + async def main(): + with trio.fail_after(delay + 2): + async with ( + tractor.open_root_actor( + debug_mode=debug_mode, + loglevel=loglevel, + ), + trio.open_nursery() as tn, + ): + get_console_log('info') + log.info('yo starting') + cs = tn.cancel_scope + tn.start_soon( + use_moc, + cs, + delay, + name='child', + ) + with trio.CancelScope() as rent_cs: + await use_moc( + cs=rent_cs, + delay=delay, + ) + trio.run(main) diff --git a/tests/test_ringbuf.py b/tests/test_ringbuf.py new file mode 100644 index 00000000..0d3b420b --- /dev/null +++ b/tests/test_ringbuf.py @@ -0,0 +1,211 @@ +import time + +import trio +import pytest + +import tractor +from tractor.ipc._ringbuf import ( + open_ringbuf, + RBToken, + RingBuffSender, + RingBuffReceiver +) +from tractor._testing.samples import ( + generate_sample_messages, +) + +# in case you don't want to melt your cores, uncomment dis! +pytestmark = pytest.mark.skip + + +@tractor.context +async def child_read_shm( + ctx: tractor.Context, + msg_amount: int, + token: RBToken, + total_bytes: int, +) -> None: + recvd_bytes = 0 + await ctx.started() + start_ts = time.time() + async with RingBuffReceiver(token) as receiver: + while recvd_bytes < total_bytes: + msg = await receiver.receive_some() + recvd_bytes += len(msg) + + # make sure we dont hold any memoryviews + # before the ctx manager aclose() + msg = None + + end_ts = time.time() + elapsed = end_ts - start_ts + elapsed_ms = int(elapsed * 1000) + + print(f'\n\telapsed ms: {elapsed_ms}') + print(f'\tmsg/sec: {int(msg_amount / elapsed):,}') + print(f'\tbytes/sec: {int(recvd_bytes / elapsed):,}') + + +@tractor.context +async def child_write_shm( + ctx: tractor.Context, + msg_amount: int, + rand_min: int, + rand_max: int, + token: RBToken, +) -> None: + msgs, total_bytes = generate_sample_messages( + msg_amount, + rand_min=rand_min, + rand_max=rand_max, + ) + await ctx.started(total_bytes) + async with RingBuffSender(token) as sender: + for msg in msgs: + await sender.send_all(msg) + + +@pytest.mark.parametrize( + 'msg_amount,rand_min,rand_max,buf_size', + [ + # simple case, fixed payloads, large buffer + (100_000, 0, 0, 10 * 1024), + + # guaranteed wrap around on every write + (100, 10 * 1024, 20 * 1024, 10 * 1024), + + # large payload size, but large buffer + (10_000, 256 * 1024, 512 * 1024, 10 * 1024 * 1024) + ], + ids=[ + 'fixed_payloads_large_buffer', + 'wrap_around_every_write', + 'large_payloads_large_buffer', + ] +) +def test_ringbuf( + msg_amount: int, + rand_min: int, + rand_max: int, + buf_size: int +): + async def main(): + with open_ringbuf( + 'test_ringbuf', + buf_size=buf_size + ) as token: + proc_kwargs = { + 'pass_fds': (token.write_eventfd, token.wrap_eventfd) + } + + common_kwargs = { + 'msg_amount': msg_amount, + 'token': token, + } + async with tractor.open_nursery() as an: + send_p = await an.start_actor( + 'ring_sender', + enable_modules=[__name__], + proc_kwargs=proc_kwargs + ) + recv_p = await an.start_actor( + 'ring_receiver', + enable_modules=[__name__], + proc_kwargs=proc_kwargs + ) + async with ( + send_p.open_context( + child_write_shm, + rand_min=rand_min, + rand_max=rand_max, + **common_kwargs + ) as (sctx, total_bytes), + recv_p.open_context( + child_read_shm, + **common_kwargs, + total_bytes=total_bytes, + ) as (sctx, _sent), + ): + await recv_p.result() + + await send_p.cancel_actor() + await recv_p.cancel_actor() + + + trio.run(main) + + +@tractor.context +async def child_blocked_receiver( + ctx: tractor.Context, + token: RBToken +): + async with RingBuffReceiver(token) as receiver: + await ctx.started() + await receiver.receive_some() + + +def test_ring_reader_cancel(): + async def main(): + with open_ringbuf('test_ring_cancel_reader') as token: + async with ( + tractor.open_nursery() as an, + RingBuffSender(token) as _sender, + ): + recv_p = await an.start_actor( + 'ring_blocked_receiver', + enable_modules=[__name__], + proc_kwargs={ + 'pass_fds': (token.write_eventfd, token.wrap_eventfd) + } + ) + async with ( + recv_p.open_context( + child_blocked_receiver, + token=token + ) as (sctx, _sent), + ): + await trio.sleep(1) + await an.cancel() + + + with pytest.raises(tractor._exceptions.ContextCancelled): + trio.run(main) + + +@tractor.context +async def child_blocked_sender( + ctx: tractor.Context, + token: RBToken +): + async with RingBuffSender(token) as sender: + await ctx.started() + await sender.send_all(b'this will wrap') + + +def test_ring_sender_cancel(): + async def main(): + with open_ringbuf( + 'test_ring_cancel_sender', + buf_size=1 + ) as token: + async with tractor.open_nursery() as an: + recv_p = await an.start_actor( + 'ring_blocked_sender', + enable_modules=[__name__], + proc_kwargs={ + 'pass_fds': (token.write_eventfd, token.wrap_eventfd) + } + ) + async with ( + recv_p.open_context( + child_blocked_sender, + token=token + ) as (sctx, _sent), + ): + await trio.sleep(1) + await an.cancel() + + + with pytest.raises(tractor._exceptions.ContextCancelled): + trio.run(main) diff --git a/tests/test_root_infect_asyncio.py b/tests/test_root_infect_asyncio.py index 93deba13..78f9b2b4 100644 --- a/tests/test_root_infect_asyncio.py +++ b/tests/test_root_infect_asyncio.py @@ -147,8 +147,7 @@ def test_trio_prestarted_task_bubbles( await trio.sleep_forever() async def _trio_main(): - # with trio.fail_after(2): - with trio.fail_after(999): + with trio.fail_after(2 if not debug_mode else 999): first: str chan: to_asyncio.LinkedTaskChannel aio_ev = asyncio.Event() @@ -217,32 +216,25 @@ def test_trio_prestarted_task_bubbles( ): aio_ev.set() - with pytest.raises( - expected_exception=ExceptionGroup, - ) as excinfo: - tractor.to_asyncio.run_as_asyncio_guest( - trio_main=_trio_main, - ) - - eg = excinfo.value - rte_eg, rest_eg = eg.split(RuntimeError) - # ensure the trio-task's error bubbled despite the aio-side # having (maybe) errored first. if aio_err_trigger in ( 'after_trio_task_starts', 'after_start_point', ): - assert len(errs := rest_eg.exceptions) == 1 - typerr = errs[0] - assert ( - type(typerr) is TypeError - and - 'trio-side' in typerr.args - ) + patt: str = 'trio-side' + expect_exc = TypeError # when aio errors BEFORE (last) trio task is scheduled, we should # never see anythinb but the aio-side. else: - assert len(rtes := rte_eg.exceptions) == 1 - assert 'asyncio-side' in rtes[0].args[0] + patt: str = 'asyncio-side' + expect_exc = RuntimeError + + with pytest.raises(expect_exc) as excinfo: + tractor.to_asyncio.run_as_asyncio_guest( + trio_main=_trio_main, + ) + + caught_exc = excinfo.value + assert patt in caught_exc.args diff --git a/tests/test_root_runtime.py b/tests/test_root_runtime.py new file mode 100644 index 00000000..6fc39b7d --- /dev/null +++ b/tests/test_root_runtime.py @@ -0,0 +1,108 @@ +''' +Runtime boot/init sanity. + +''' + +import pytest +import trio + +import tractor +from tractor._exceptions import RuntimeFailure + + +@tractor.context +async def open_new_root_in_sub( + ctx: tractor.Context, +) -> None: + + async with tractor.open_root_actor(): + pass + + +@pytest.mark.parametrize( + 'open_root_in', + ['root', 'sub'], + ids='open_2nd_root_in={}'.format, +) +def test_only_one_root_actor( + open_root_in: str, + reg_addr: tuple, + debug_mode: bool +): + ''' + Verify we specially fail whenever more then one root actor + is attempted to be opened within an already opened tree. + + ''' + async def main(): + async with tractor.open_nursery() as an: + + if open_root_in == 'root': + async with tractor.open_root_actor( + registry_addrs=[reg_addr], + ): + pass + + ptl: tractor.Portal = await an.start_actor( + name='bad_rooty_boi', + enable_modules=[__name__], + ) + + async with ptl.open_context( + open_new_root_in_sub, + ) as (ctx, first): + pass + + if open_root_in == 'root': + with pytest.raises( + RuntimeFailure + ) as excinfo: + trio.run(main) + + else: + with pytest.raises( + tractor.RemoteActorError, + ) as excinfo: + trio.run(main) + + assert excinfo.value.boxed_type is RuntimeFailure + + +def test_implicit_root_via_first_nursery( + reg_addr: tuple, + debug_mode: bool +): + ''' + The first `ActorNursery` open should implicitly call + `_root.open_root_actor()`. + + ''' + async def main(): + async with tractor.open_nursery() as an: + assert an._implicit_runtime_started + assert tractor.current_actor().aid.name == 'root' + + trio.run(main) + + +def test_runtime_vars_unset( + reg_addr: tuple, + debug_mode: bool +): + ''' + Ensure any `._state._runtime_vars` are restored to default values + after the root actor-runtime exits! + + ''' + assert not tractor._state._runtime_vars['_debug_mode'] + async def main(): + assert not tractor._state._runtime_vars['_debug_mode'] + async with tractor.open_nursery( + debug_mode=True, + ): + assert tractor._state._runtime_vars['_debug_mode'] + + # after runtime closure, should be reverted! + assert not tractor._state._runtime_vars['_debug_mode'] + + trio.run(main) diff --git a/tests/test_shm.py b/tests/test_shm.py new file mode 100644 index 00000000..ddeb67aa --- /dev/null +++ b/tests/test_shm.py @@ -0,0 +1,167 @@ +""" +Shared mem primitives and APIs. + +""" +import uuid + +# import numpy +import pytest +import trio +import tractor +from tractor.ipc._shm import ( + open_shm_list, + attach_shm_list, +) + + +@tractor.context +async def child_attach_shml_alot( + ctx: tractor.Context, + shm_key: str, +) -> None: + + await ctx.started(shm_key) + + # now try to attach a boatload of times in a loop.. + for _ in range(1000): + shml = attach_shm_list( + key=shm_key, + readonly=False, + ) + assert shml.shm.name == shm_key + await trio.sleep(0.001) + + +def test_child_attaches_alot(): + async def main(): + async with tractor.open_nursery() as an: + + # allocate writeable list in parent + key = f'shml_{uuid.uuid4()}' + shml = open_shm_list( + key=key, + ) + + portal = await an.start_actor( + 'shm_attacher', + enable_modules=[__name__], + ) + + async with ( + portal.open_context( + child_attach_shml_alot, + shm_key=shml.key, + ) as (ctx, start_val), + ): + assert start_val == key + await ctx.result() + + await portal.cancel_actor() + + trio.run(main) + + +@tractor.context +async def child_read_shm_list( + ctx: tractor.Context, + shm_key: str, + use_str: bool, + frame_size: int, +) -> None: + + # attach in child + shml = attach_shm_list( + key=shm_key, + # dtype=str if use_str else float, + ) + await ctx.started(shml.key) + + async with ctx.open_stream() as stream: + async for i in stream: + print(f'(child): reading shm list index: {i}') + + if use_str: + expect = str(float(i)) + else: + expect = float(i) + + if frame_size == 1: + val = shml[i] + assert expect == val + print(f'(child): reading value: {val}') + else: + frame = shml[i - frame_size:i] + print(f'(child): reading frame: {frame}') + + +@pytest.mark.parametrize( + 'use_str', + [False, True], + ids=lambda i: f'use_str_values={i}', +) +@pytest.mark.parametrize( + 'frame_size', + [1, 2**6, 2**10], + ids=lambda i: f'frame_size={i}', +) +def test_parent_writer_child_reader( + use_str: bool, + frame_size: int, +): + + async def main(): + async with tractor.open_nursery( + # debug_mode=True, + ) as an: + + portal = await an.start_actor( + 'shm_reader', + enable_modules=[__name__], + debug_mode=True, + ) + + # allocate writeable list in parent + key = 'shm_list' + seq_size = int(2 * 2 ** 10) + shml = open_shm_list( + key=key, + size=seq_size, + dtype=str if use_str else float, + readonly=False, + ) + + async with ( + portal.open_context( + child_read_shm_list, + shm_key=key, + use_str=use_str, + frame_size=frame_size, + ) as (ctx, sent), + + ctx.open_stream() as stream, + ): + + assert sent == key + + for i in range(seq_size): + + val = float(i) + if use_str: + val = str(val) + + # print(f'(parent): writing {val}') + shml[i] = val + + # only on frame fills do we + # signal to the child that a frame's + # worth is ready. + if (i % frame_size) == 0: + print(f'(parent): signalling frame full on {val}') + await stream.send(i) + else: + print(f'(parent): signalling final frame on {val}') + await stream.send(i) + + await portal.cancel_actor() + + trio.run(main) diff --git a/tests/test_spawning.py b/tests/test_spawning.py index 99ec9abc..30e084d5 100644 --- a/tests/test_spawning.py +++ b/tests/test_spawning.py @@ -2,6 +2,7 @@ Spawning basics """ +from functools import partial from typing import ( Any, ) @@ -12,74 +13,99 @@ import tractor from tractor._testing import tractor_test -data_to_pass_down = {'doggy': 10, 'kitty': 4} +data_to_pass_down = { + 'doggy': 10, + 'kitty': 4, +} async def spawn( - is_arbiter: bool, + should_be_root: bool, data: dict, reg_addr: tuple[str, int], + + debug_mode: bool = False, ): - namespaces = [__name__] - await trio.sleep(0.1) + actor = tractor.current_actor(err_on_no_runtime=False) - async with tractor.open_root_actor( - arbiter_addr=reg_addr, - ): - actor = tractor.current_actor() - assert actor.is_arbiter == is_arbiter - data = data_to_pass_down + if should_be_root: + assert actor is None # no runtime yet + async with ( + tractor.open_root_actor( + arbiter_addr=reg_addr, + ), + tractor.open_nursery() as an, + ): + # now runtime exists + actor: tractor.Actor = tractor.current_actor() + assert actor.is_arbiter == should_be_root - if actor.is_arbiter: - async with tractor.open_nursery() as nursery: + # spawns subproc here + portal: tractor.Portal = await an.run_in_actor( + fn=spawn, - # forks here - portal = await nursery.run_in_actor( - spawn, - is_arbiter=False, - name='sub-actor', - data=data, - reg_addr=reg_addr, - enable_modules=namespaces, - ) + # spawning args + name='sub-actor', + enable_modules=[__name__], - assert len(nursery._children) == 1 - assert portal.channel.uid in tractor.current_actor()._peers - # be sure we can still get the result - result = await portal.result() - assert result == 10 - return result - else: - return 10 + # passed to a subactor-recursive RPC invoke + # of this same `spawn()` fn. + should_be_root=False, + data=data_to_pass_down, + reg_addr=reg_addr, + ) + + assert len(an._children) == 1 + assert ( + portal.channel.uid + in + tractor.current_actor().ipc_server._peers + ) + + # get result from child subactor + result = await portal.result() + assert result == 10 + return result + else: + assert actor.is_arbiter == should_be_root + return 10 -def test_local_arbiter_subactor_global_state( - reg_addr, +def test_run_in_actor_same_func_in_child( + reg_addr: tuple, + debug_mode: bool, ): result = trio.run( - spawn, - True, - data_to_pass_down, - reg_addr, + partial( + spawn, + should_be_root=True, + data=data_to_pass_down, + reg_addr=reg_addr, + debug_mode=debug_mode, + ) ) assert result == 10 async def movie_theatre_question(): - """A question asked in a dark theatre, in a tangent + ''' + A question asked in a dark theatre, in a tangent (errr, I mean different) process. - """ + + ''' return 'have you ever seen a portal?' @tractor_test async def test_movie_theatre_convo(start_method): - """The main ``tractor`` routine. - """ - async with tractor.open_nursery() as n: + ''' + The main ``tractor`` routine. - portal = await n.start_actor( + ''' + async with tractor.open_nursery(debug_mode=True) as an: + + portal = await an.start_actor( 'frank', # enable the actor to run funcs from this current module enable_modules=[__name__], @@ -118,8 +144,8 @@ async def test_most_beautiful_word( with trio.fail_after(1): async with tractor.open_nursery( debug_mode=debug_mode, - ) as n: - portal = await n.run_in_actor( + ) as an: + portal = await an.run_in_actor( cellar_door, return_value=return_value, name='some_linguist', diff --git a/tests/test_trioisms.py b/tests/test_trioisms.py index 9f1ccec9..ca1e6d55 100644 --- a/tests/test_trioisms.py +++ b/tests/test_trioisms.py @@ -8,6 +8,7 @@ from contextlib import ( ) import pytest +from tractor.trionics import collapse_eg import trio from trio import TaskStatus @@ -64,9 +65,8 @@ def test_stashed_child_nursery(use_start_soon): async def main(): async with ( - trio.open_nursery( - strict_exception_groups=False, - ) as pn, + collapse_eg(), + trio.open_nursery() as pn, ): cn = await pn.start(mk_child_nursery) assert cn @@ -112,55 +112,11 @@ def test_acm_embedded_nursery_propagates_enter_err( ''' import tractor - @acm - async def maybe_raise_from_masking_exc( - tn: trio.Nursery, - unmask_from: BaseException|None = trio.Cancelled - - # TODO, maybe offer a collection? - # unmask_from: set[BaseException] = { - # trio.Cancelled, - # }, - ): - if not unmask_from: - yield - return - - try: - yield - except* unmask_from as be_eg: - - # TODO, if we offer `unmask_from: set` - # for masker_exc_type in unmask_from: - - matches, rest = be_eg.split(unmask_from) - if not matches: - raise - - for exc_match in be_eg.exceptions: - if ( - (exc_ctx := exc_match.__context__) - and - type(exc_ctx) not in { - # trio.Cancelled, # always by default? - unmask_from, - } - ): - exc_ctx.add_note( - f'\n' - f'WARNING: the above error was masked by a {unmask_from!r} !?!\n' - f'Are you always cancelling? Say from a `finally:` ?\n\n' - - f'{tn!r}' - ) - raise exc_ctx from exc_match - - @acm async def wraps_tn_that_always_cancels(): async with ( trio.open_nursery() as tn, - maybe_raise_from_masking_exc( + tractor.trionics.maybe_raise_from_masking_exc( tn=tn, unmask_from=( trio.Cancelled @@ -180,7 +136,8 @@ def test_acm_embedded_nursery_propagates_enter_err( with tractor.devx.maybe_open_crash_handler( pdb=debug_mode, ) as bxerr: - assert not bxerr.value + if bxerr: + assert not bxerr.value async with ( wraps_tn_that_always_cancels() as tn, @@ -201,3 +158,58 @@ def test_acm_embedded_nursery_propagates_enter_err( assert_eg, rest_eg = eg.split(AssertionError) assert len(assert_eg.exceptions) == 1 + + + +def test_gatherctxs_with_memchan_breaks_multicancelled( + debug_mode: bool, +): + ''' + Demo how a using an `async with sndchan` inside a `.trionics.gather_contexts()` task + will break a strict-eg-tn's multi-cancelled absorption.. + + ''' + from tractor import ( + trionics, + ) + + @acm + async def open_memchan() -> trio.abc.ReceiveChannel: + + task: trio.Task = trio.lowlevel.current_task() + print( + f'Opening {task!r}\n' + ) + + # 1 to force eager sending + send, recv = trio.open_memory_channel(16) + + try: + async with send: + yield recv + finally: + print( + f'Closed {task!r}\n' + ) + + + async def main(): + async with ( + # XXX should ensure ONLY the KBI + # is relayed upward + collapse_eg(), + trio.open_nursery(), # as tn, + + trionics.gather_contexts([ + open_memchan(), + open_memchan(), + ]) as recv_chans, + ): + assert len(recv_chans) == 2 + + await trio.sleep(1) + raise KeyboardInterrupt + # tn.cancel_scope.cancel() + + with pytest.raises(KeyboardInterrupt): + trio.run(main) diff --git a/tractor/__init__.py b/tractor/__init__.py index 0c011a22..6fac747f 100644 --- a/tractor/__init__.py +++ b/tractor/__init__.py @@ -64,7 +64,7 @@ from ._root import ( run_daemon as run_daemon, open_root_actor as open_root_actor, ) -from ._ipc import Channel as Channel +from .ipc import Channel as Channel from ._portal import Portal as Portal from ._runtime import Actor as Actor # from . import hilevel as hilevel diff --git a/tractor/_addr.py b/tractor/_addr.py new file mode 100644 index 00000000..d8d11227 --- /dev/null +++ b/tractor/_addr.py @@ -0,0 +1,282 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +from __future__ import annotations +from uuid import uuid4 +from typing import ( + Protocol, + ClassVar, + Type, + TYPE_CHECKING, +) + +from bidict import bidict +from trio import ( + SocketListener, +) + +from .log import get_logger +from ._state import ( + _def_tpt_proto, +) +from .ipc._tcp import TCPAddress +from .ipc._uds import UDSAddress + +if TYPE_CHECKING: + from ._runtime import Actor + +log = get_logger(__name__) + + +# TODO, maybe breakout the netns key to a struct? +# class NetNs(Struct)[str, int]: +# ... + +# TODO, can't we just use a type alias +# for this? namely just some `tuple[str, int, str, str]`? +# +# -[ ] would also just be simpler to keep this as SockAddr[tuple] +# or something, implying it's just a simple pair of values which can +# presumably be mapped to all transports? +# -[ ] `pydoc socket.socket.getsockname()` delivers a 4-tuple for +# ipv6 `(hostaddr, port, flowinfo, scope_id)`.. so how should we +# handle that? +# -[ ] as a further alternative to this wrap()/unwrap() approach we +# could just implement `enc/dec_hook()`s for the `Address`-types +# and just deal with our internal objs directly and always and +# leave it to the codec layer to figure out marshalling? +# |_ would mean only one spot to do the `.unwrap()` (which we may +# end up needing to call from the hook()s anyway?) +# -[x] rename to `UnwrappedAddress[Descriptor]` ?? +# seems like the right name as per, +# https://www.geeksforgeeks.org/introduction-to-address-descriptor/ +# +UnwrappedAddress = ( + # tcp/udp/uds + tuple[ + str, # host/domain(tcp), filesys-dir(uds) + int|str, # port/path(uds) + ] + # ?TODO? should we also include another 2 fields from + # our `Aid` msg such that we include the runtime `Actor.uid` + # of `.name` and `.uuid`? + # - would ensure uniqueness across entire net? + # - allows for easier runtime-level filtering of "actors by + # service name" +) + + +# TODO, maybe rename to `SocketAddress`? +class Address(Protocol): + proto_key: ClassVar[str] + unwrapped_type: ClassVar[UnwrappedAddress] + + # TODO, i feel like an `.is_bound()` is a better thing to + # support? + # Lke, what use does this have besides a noop and if it's not + # valid why aren't we erroring on creation/use? + @property + def is_valid(self) -> bool: + ... + + # TODO, maybe `.netns` is a better name? + @property + def namespace(self) -> tuple[str, int]|None: + ''' + The if-available, OS-specific "network namespace" key. + + ''' + ... + + @property + def bindspace(self) -> str: + ''' + Deliver the socket address' "bindable space" from + a `socket.socket.bind()` and thus from the perspective of + specific transport protocol domain. + + I.e. for most (layer-4) network-socket protocols this is + normally the ipv4/6 address, for UDS this is normally + a filesystem (sub-directory). + + For (distributed) network protocols this is normally the routing + layer's domain/(ip-)address, though it might also include a "network namespace" + key different then the default. + + For local-host-only transports this is either an explicit + namespace (with types defined by the OS: netns, Cgroup, IPC, + pid, etc. on linux) or failing that the sub-directory in the + filesys in which socket/shm files are located *under*. + + ''' + ... + + @classmethod + def from_addr(cls, addr: UnwrappedAddress) -> Address: + ... + + def unwrap(self) -> UnwrappedAddress: + ''' + Deliver the underying minimum field set in + a primitive python data type-structure. + ''' + ... + + @classmethod + def get_random( + cls, + current_actor: Actor, + bindspace: str|None = None, + ) -> Address: + ... + + # TODO, this should be something like a `.get_def_registar_addr()` + # or similar since, + # - it should be a **host singleton** (not root/tree singleton) + # - we **only need this value** when one isn't provided to the + # runtime at boot and we want to implicitly provide a host-wide + # registrar. + # - each rooted-actor-tree should likely have its own + # micro-registry (likely the root being it), also see + @classmethod + def get_root(cls) -> Address: + ... + + def __repr__(self) -> str: + ... + + def __eq__(self, other) -> bool: + ... + + async def open_listener( + self, + **kwargs, + ) -> SocketListener: + ... + + async def close_listener(self): + ... + + +_address_types: bidict[str, Type[Address]] = { + 'tcp': TCPAddress, + 'uds': UDSAddress +} + + +# TODO! really these are discovery sys default addrs ONLY useful for +# when none is provided to a root actor on first boot. +_default_lo_addrs: dict[ + str, + UnwrappedAddress +] = { + 'tcp': TCPAddress.get_root().unwrap(), + 'uds': UDSAddress.get_root().unwrap(), +} + + +def get_address_cls(name: str) -> Type[Address]: + return _address_types[name] + + +def is_wrapped_addr(addr: any) -> bool: + return type(addr) in _address_types.values() + + +def mk_uuid() -> str: + ''' + Encapsulate creation of a uuid4 as `str` as used + for creating `Actor.uid: tuple[str, str]` and/or + `.msg.types.Aid`. + + ''' + return str(uuid4()) + + +def wrap_address( + addr: UnwrappedAddress +) -> Address: + ''' + Wrap an `UnwrappedAddress` as an `Address`-type based + on matching builtin python data-structures which we adhoc + use for each. + + XXX NOTE, careful care must be placed to ensure + `UnwrappedAddress` cases are **definitely unique** otherwise the + wrong transport backend may be loaded and will break many + low-level things in our runtime in a not-fun-to-debug way! + + XD + + ''' + if is_wrapped_addr(addr): + return addr + + cls: Type|None = None + # if 'sock' in addr[0]: + # import pdbp; pdbp.set_trace() + match addr: + + # classic network socket-address as tuple/list + case ( + (str(), int()) + | + [str(), int()] + ): + cls = TCPAddress + + case ( + # (str()|Path(), str()|Path()), + # ^TODO? uhh why doesn't this work!? + + (_, filename) + ) if type(filename) is str: + cls = UDSAddress + + # likely an unset UDS or TCP reg address as defaulted in + # `_state._runtime_vars['_root_mailbox']` + # + # TODO? figure out when/if we even need this? + case ( + None + | + [None, None] + ): + cls: Type[Address] = get_address_cls(_def_tpt_proto) + addr: UnwrappedAddress = cls.get_root().unwrap() + + case _: + # import pdbp; pdbp.set_trace() + raise TypeError( + f'Can not wrap unwrapped-address ??\n' + f'type(addr): {type(addr)!r}\n' + f'addr: {addr!r}\n' + ) + + return cls.from_addr(addr) + + +def default_lo_addrs( + transports: list[str], +) -> list[Type[Address]]: + ''' + Return the default, host-singleton, registry address + for an input transport key set. + + ''' + return [ + _default_lo_addrs[transport] + for transport in transports + ] diff --git a/tractor/_child.py b/tractor/_child.py index 4226ae90..d2f03f55 100644 --- a/tractor/_child.py +++ b/tractor/_child.py @@ -31,8 +31,12 @@ def parse_uid(arg): return str(name), str(uuid) # ensures str encoding def parse_ipaddr(arg): - host, port = literal_eval(arg) - return (str(host), int(port)) + try: + return literal_eval(arg) + + except (ValueError, SyntaxError): + # UDS: try to interpret as a straight up str + return arg if __name__ == "__main__": @@ -46,8 +50,8 @@ if __name__ == "__main__": args = parser.parse_args() subactor = Actor( - args.uid[0], - uid=args.uid[1], + name=args.uid[0], + uuid=args.uid[1], loglevel=args.loglevel, spawn_method="trio" ) diff --git a/tractor/_clustering.py b/tractor/_clustering.py index 46224d6f..dbb50304 100644 --- a/tractor/_clustering.py +++ b/tractor/_clustering.py @@ -55,10 +55,17 @@ async def open_actor_cluster( raise ValueError( 'Number of names is {len(names)} but count it {count}') - async with tractor.open_nursery( - **runtime_kwargs, - ) as an: - async with trio.open_nursery() as n: + async with ( + # tractor.trionics.collapse_eg(), + tractor.open_nursery( + **runtime_kwargs, + ) as an + ): + async with ( + # tractor.trionics.collapse_eg(), + trio.open_nursery() as tn, + tractor.trionics.maybe_raise_from_masking_exc() + ): uid = tractor.current_actor().uid async def _start(name: str) -> None: @@ -69,9 +76,8 @@ async def open_actor_cluster( ) for name in names: - n.start_soon(_start, name) + tn.start_soon(_start, name) assert len(portals) == count yield portals - await an.cancel(hard_kill=hard_kill) diff --git a/tractor/_context.py b/tractor/_context.py index 201e920a..9e277a88 100644 --- a/tractor/_context.py +++ b/tractor/_context.py @@ -89,7 +89,7 @@ from .msg import ( pretty_struct, _ops as msgops, ) -from ._ipc import ( +from .ipc import ( Channel, ) from ._streaming import ( @@ -101,11 +101,14 @@ from ._state import ( debug_mode, _ctxvar_Context, ) +from .trionics import ( + collapse_eg, +) # ------ - ------ if TYPE_CHECKING: from ._portal import Portal from ._runtime import Actor - from ._ipc import MsgTransport + from .ipc._transport import MsgTransport from .devx._frame_stack import ( CallerInfo, ) @@ -151,7 +154,7 @@ class Context: 2 cancel-scope-linked, communicating and parallel executing `Task`s. Contexts are allocated on each side of any task RPC-linked msg dialog, i.e. for every request to a remote - actor from a `Portal`. On the "callee" side a context is + actor from a `Portal`. On the "child" side a context is always allocated inside `._rpc._invoke()`. TODO: more detailed writeup on cancellation, error and @@ -219,8 +222,8 @@ class Context: # `._runtime.invoke()`. _remote_func_type: str | None = None - # NOTE: (for now) only set (a portal) on the caller side since - # the callee doesn't generally need a ref to one and should + # NOTE: (for now) only set (a portal) on the parent side since + # the child doesn't generally need a ref to one and should # normally need to explicitly ask for handle to its peer if # more the the `Context` is needed? _portal: Portal | None = None @@ -249,12 +252,12 @@ class Context: _outcome_msg: Return|Error|ContextCancelled = Unresolved # on a clean exit there should be a final value - # delivered from the far end "callee" task, so + # delivered from the far end "child" task, so # this value is only set on one side. # _result: Any | int = None _result: PayloadT|Unresolved = Unresolved - # if the local "caller" task errors this value is always set + # if the local "parent" task errors this value is always set # to the error that was captured in the # `Portal.open_context().__aexit__()` teardown block OR, in # 2 special cases when an (maybe) expected remote error @@ -290,9 +293,9 @@ class Context: # a `ContextCancelled` due to a call to `.cancel()` triggering # "graceful closure" on either side: # - `._runtime._invoke()` will check this flag before engaging - # the crash handler REPL in such cases where the "callee" + # the crash handler REPL in such cases where the "child" # raises the cancellation, - # - `.devx._debug.lock_stdio_for_peer()` will set it to `False` if + # - `.devx.debug.lock_stdio_for_peer()` will set it to `False` if # the global tty-lock has been configured to filter out some # actors from being able to acquire the debugger lock. _enter_debugger_on_cancel: bool = True @@ -304,8 +307,8 @@ class Context: _stream_opened: bool = False _stream: MsgStream|None = None - # caller of `Portal.open_context()` for - # logging purposes mostly + # the parent-task's calling-fn's frame-info, the frame above + # `Portal.open_context()`, for introspection/logging. _caller_info: CallerInfo|None = None # overrun handling machinery @@ -366,7 +369,7 @@ class Context: # f' ---\n' f' |_ipc: {self.dst_maddr}\n' # f' dst_maddr{ds}{self.dst_maddr}\n' - f" uid{ds}'{self.chan.uid}'\n" + f" uid{ds}'{self.chan.aid}'\n" f" cid{ds}'{self.cid}'\n" # f' ---\n' f'\n' @@ -526,11 +529,11 @@ class Context: ''' Exactly the value of `self._scope.cancelled_caught` (delegation) and should only be (able to be read as) - `True` for a `.side == "caller"` ctx wherein the + `True` for a `.side == "parent"` ctx wherein the `Portal.open_context()` block was exited due to a call to `._scope.cancel()` - which should only ocurr in 2 cases: - - a caller side calls `.cancel()`, the far side cancels + - a parent side calls `.cancel()`, the far side cancels and delivers back a `ContextCancelled` (making `.cancel_acked == True`) and `._scope.cancel()` is called by `._maybe_cancel_and_set_remote_error()` which @@ -539,20 +542,20 @@ class Context: => `._scope.cancelled_caught == True` by normal `trio` cs semantics. - - a caller side is delivered a `._remote_error: + - a parent side is delivered a `._remote_error: RemoteActorError` via `._deliver_msg()` and a transitive call to `_maybe_cancel_and_set_remote_error()` calls `._scope.cancel()` and that cancellation eventually results in `trio.Cancelled`(s) caught in the `.open_context()` handling around the @acm's `yield`. - Only as an FYI, in the "callee" side case it can also be + Only as an FYI, in the "child" side case it can also be set but never is readable by any task outside the RPC machinery in `._invoke()` since,: - - when a callee side calls `.cancel()`, `._scope.cancel()` + - when a child side calls `.cancel()`, `._scope.cancel()` is called immediately and handled specially inside `._invoke()` to raise a `ContextCancelled` which is then - sent to the caller side. + sent to the parent side. However, `._scope.cancelled_caught` can NEVER be accessed/read as `True` by any RPC invoked task since it @@ -663,7 +666,7 @@ class Context: when called/closed by actor local task(s). NOTEs: - - It is expected that the caller has previously unwrapped + - It is expected that the parent has previously unwrapped the remote error using a call to `unpack_error()` and provides that output exception value as the input `error` argument *here*. @@ -673,7 +676,7 @@ class Context: `Portal.open_context()` (ideally) we want to interrupt any ongoing local tasks operating within that `Context`'s cancel-scope so as to be notified ASAP of - the remote error and engage any caller handling (eg. + the remote error and engage any parent handling (eg. for cross-process task supervision). - In some cases we may want to raise the remote error @@ -740,6 +743,8 @@ class Context: # cancelled, NOT their reported canceller. IOW in the # latter case we're cancelled by someone else getting # cancelled. + # + # !TODO, switching to `Actor.aid` here! if (canc := error.canceller) == self._actor.uid: whom: str = 'us' self._canceller = canc @@ -859,19 +864,10 @@ class Context: @property def dst_maddr(self) -> str: chan: Channel = self.chan - dst_addr, dst_port = chan.raddr trans: MsgTransport = chan.transport # cid: str = self.cid # cid_head, cid_tail = cid[:6], cid[-6:] - return ( - f'/ipv4/{dst_addr}' - f'/{trans.name_key}/{dst_port}' - # f'/{self.chan.uid[0]}' - # f'/{self.cid}' - - # f'/cid={cid_head}..{cid_tail}' - # TODO: ? not use this ^ right ? - ) + return trans.maddr dmaddr = dst_maddr @@ -890,6 +886,11 @@ class Context: @property def repr_caller(self) -> str: + ''' + Render a "namespace-path" style representation of the calling + task-fn. + + ''' ci: CallerInfo|None = self._caller_info if ci: return ( @@ -903,7 +904,7 @@ class Context: def repr_api(self) -> str: return 'Portal.open_context()' - # TODO: use `.dev._frame_stack` scanning to find caller! + # TODO: use `.dev._frame_stack` scanning to find caller fn! # ci: CallerInfo|None = self._caller_info # if ci: # return ( @@ -938,7 +939,7 @@ class Context: => That is, an IPC `Context` (this) **does not** have the same semantics as a `trio.CancelScope`. - If the caller (who entered the `Portal.open_context()`) + If the parent (who entered the `Portal.open_context()`) desires that the internal block's cancel-scope be cancelled it should open its own `trio.CancelScope` and manage it as needed. @@ -949,15 +950,15 @@ class Context: self.cancel_called = True header: str = ( - f'Cancelling ctx from {side.upper()}-side\n' + f'Cancelling ctx from {side!r}-side\n' ) reminfo: str = ( # ' =>\n' # f'Context.cancel() => {self.chan.uid}\n' + f'\n' f'c)=> {self.chan.uid}\n' - # f'{self.chan.uid}\n' - f' |_ @{self.dst_maddr}\n' - f' >> {self.repr_rpc}\n' + f' |_[{self.dst_maddr}\n' + f' >> {self.repr_rpc}\n' # f' >> {self._nsf}() -> {codec}[dict]:\n\n' # TODO: pull msg-type from spec re #320 ) @@ -1010,7 +1011,6 @@ class Context: else: log.cancel( f'Timed out on cancel request of remote task?\n' - f'\n' f'{reminfo}' ) @@ -1021,7 +1021,7 @@ class Context: # `_invoke()` RPC task. # # NOTE: on this side we ALWAYS cancel the local scope - # since the caller expects a `ContextCancelled` to be sent + # since the parent expects a `ContextCancelled` to be sent # from `._runtime._invoke()` back to the other side. The # logic for catching the result of the below # `._scope.cancel()` is inside the `._runtime._invoke()` @@ -1078,9 +1078,25 @@ class Context: |RemoteActorError # stream overrun caused and ignored by us ): ''' - Maybe raise a remote error depending on the type of error - and *who* (i.e. which task from which actor) requested - a cancellation (if any). + Maybe raise a remote error depending on the type of error and + *who*, i.e. which side of the task pair across actors, + requested a cancellation (if any). + + Depending on the input config-params suppress raising + certain remote excs: + + - if `remote_error: ContextCancelled` (ctxc) AND this side's + task is the "requester", it at somem point called + `Context.cancel()`, then the peer's ctxc is treated + as a "cancel ack". + + |_ this behaves exactly like how `trio.Nursery.cancel_scope` + absorbs any `BaseExceptionGroup[trio.Cancelled]` wherein the + owning parent task never will raise a `trio.Cancelled` + if `CancelScope.cancel_called == True`. + + - `remote_error: StreamOverrrun` (overrun) AND + `raise_overrun_from_self` is set. ''' __tracebackhide__: bool = hide_tb @@ -1122,18 +1138,19 @@ class Context: # for this ^, NO right? ) or ( - # NOTE: whenever this context is the cause of an - # overrun on the remote side (aka we sent msgs too - # fast that the remote task was overrun according - # to `MsgStream` buffer settings) AND the caller - # has requested to not raise overruns this side - # caused, we also silently absorb any remotely - # boxed `StreamOverrun`. This is mostly useful for - # supressing such faults during - # cancellation/error/final-result handling inside - # `msg._ops.drain_to_final_msg()` such that we do not - # raise such errors particularly in the case where + # NOTE: whenever this side is the cause of an + # overrun on the peer side, i.e. we sent msgs too + # fast and the peer task was overrun according + # to `MsgStream` buffer settings, AND this was + # called with `raise_overrun_from_self=True` (the + # default), silently absorb any `StreamOverrun`. + # + # XXX, this is namely useful for supressing such faults + # during cancellation/error/final-result handling inside + # `.msg._ops.drain_to_final_msg()` such that we do not + # raise during a cancellation-request, i.e. when # `._cancel_called == True`. + # not raise_overrun_from_self and isinstance(remote_error, RemoteActorError) and remote_error.boxed_type is StreamOverrun @@ -1177,8 +1194,8 @@ class Context: ) -> Any|Exception: ''' - From some (caller) side task, wait for and return the final - result from the remote (callee) side's task. + From some (parent) side task, wait for and return the final + result from the remote (child) side's task. This provides a mechanism for one task running in some actor to wait on another task at the other side, in some other actor, to terminate. @@ -1243,8 +1260,8 @@ class Context: # ?XXX, should already be set in `._deliver_msg()` right? if self._outcome_msg is not Unresolved: - # from .devx import _debug - # await _debug.pause() + # from .devx import debug + # await debug.pause() assert self._outcome_msg is outcome_msg else: self._outcome_msg = outcome_msg @@ -1474,6 +1491,12 @@ class Context: ): status = 'peer-cancelled' + case ( + Unresolved, + trio.Cancelled(), # any error-type + ) if self.canceller: + status = 'actor-cancelled' + # (remote) error condition case ( Unresolved, @@ -1587,7 +1610,7 @@ class Context: raise err # TODO: maybe a flag to by-pass encode op if already done - # here in caller? + # here in parent? await self.chan.send(started_msg) # set msg-related internal runtime-state @@ -1663,7 +1686,7 @@ class Context: XXX RULES XXX ------ - ------ - - NEVER raise remote errors from this method; a runtime task caller. + - NEVER raise remote errors from this method; a calling runtime-task. An error "delivered" to a ctx should always be raised by the corresponding local task operating on the `Portal`/`Context` APIs. @@ -1739,7 +1762,7 @@ class Context: else: report = ( - 'Queueing OVERRUN msg on caller task:\n\n' + 'Queueing OVERRUN msg on parent task:\n\n' + report ) log.debug(report) @@ -1935,12 +1958,12 @@ async def open_context_from_portal( IPC protocol. The yielded `tuple` is a pair delivering a `tractor.Context` - and any first value "sent" by the "callee" task via a call + and any first value "sent" by the "child" task via a call to `Context.started()`; this side of the - context does not unblock until the "callee" task calls + context does not unblock until the "child" task calls `.started()` in similar style to `trio.Nursery.start()`. - When the "callee" (side that is "called"/started by a call - to *this* method) returns, the caller side (this) unblocks + When the "child" (side that is "called"/started by a call + to *this* method) returns, the parent side (this) unblocks and any final value delivered from the other end can be retrieved using the `Contex.wait_for_result()` api. @@ -1953,7 +1976,7 @@ async def open_context_from_portal( __tracebackhide__: bool = hide_tb # denote this frame as a "runtime frame" for stack - # introspection where we report the caller code in logging + # introspection where we report the parent code in logging # and error message content. # NOTE: 2 bc of the wrapping `@acm` __runtimeframe__: int = 2 # noqa @@ -2012,13 +2035,11 @@ async def open_context_from_portal( # placeholder for any exception raised in the runtime # or by user tasks which cause this context's closure. scope_err: BaseException|None = None - ctxc_from_callee: ContextCancelled|None = None + ctxc_from_child: ContextCancelled|None = None try: async with ( - trio.open_nursery( - strict_exception_groups=False, - ) as tn, - + collapse_eg(), + trio.open_nursery() as tn, msgops.maybe_limit_plds( ctx=ctx, spec=ctx_meta.get('pld_spec'), @@ -2093,7 +2114,7 @@ async def open_context_from_portal( # that we can re-use it around the `yield` ^ here # or vice versa? # - # maybe TODO NOTE: between the caller exiting and + # maybe TODO NOTE: between the parent exiting and # arriving here the far end may have sent a ctxc-msg or # other error, so the quetion is whether we should check # for it here immediately and maybe raise so as to engage @@ -2159,16 +2180,16 @@ async def open_context_from_portal( # request in which case we DO let the error bubble to the # opener. # - # 2-THIS "caller" task somewhere invoked `Context.cancel()` - # and received a `ContextCanclled` from the "callee" + # 2-THIS "parent" task somewhere invoked `Context.cancel()` + # and received a `ContextCanclled` from the "child" # task, in which case we mask the `ContextCancelled` from - # bubbling to this "caller" (much like how `trio.Nursery` + # bubbling to this "parent" (much like how `trio.Nursery` # swallows any `trio.Cancelled` bubbled by a call to # `Nursery.cancel_scope.cancel()`) except ContextCancelled as ctxc: scope_err = ctxc ctx._local_error: BaseException = scope_err - ctxc_from_callee = ctxc + ctxc_from_child = ctxc # XXX TODO XXX: FIX THIS debug_mode BUGGGG!!! # using this code and then resuming the REPL will @@ -2179,7 +2200,7 @@ async def open_context_from_portal( # debugging the tractor-runtime itself using it's # own `.devx.` tooling! # - # await _debug.pause() + # await debug.pause() # CASE 2: context was cancelled by local task calling # `.cancel()`, we don't raise and the exit block should @@ -2205,11 +2226,11 @@ async def open_context_from_portal( # the above `._scope` can be cancelled due to: # 1. an explicit self cancel via `Context.cancel()` or # `Actor.cancel()`, - # 2. any "callee"-side remote error, possibly also a cancellation + # 2. any "child"-side remote error, possibly also a cancellation # request by some peer, - # 3. any "caller" (aka THIS scope's) local error raised in the above `yield` + # 3. any "parent" (aka THIS scope's) local error raised in the above `yield` except ( - # CASE 3: standard local error in this caller/yieldee + # CASE 3: standard local error in this parent/yieldee Exception, # CASES 1 & 2: can manifest as a `ctx._scope_nursery` @@ -2223,9 +2244,9 @@ async def open_context_from_portal( # any `Context._maybe_raise_remote_err()` call. # # 2.-`BaseExceptionGroup[ContextCancelled | RemoteActorError]` - # from any error delivered from the "callee" side + # from any error delivered from the "child" side # AND a group-exc is only raised if there was > 1 - # tasks started *here* in the "caller" / opener + # tasks started *here* in the "parent" / opener # block. If any one of those tasks calls # `.wait_for_result()` or `MsgStream.receive()` # `._maybe_raise_remote_err()` will be transitively @@ -2238,18 +2259,18 @@ async def open_context_from_portal( trio.Cancelled, # NOTE: NOT from inside the ctx._scope KeyboardInterrupt, - ) as caller_err: - scope_err = caller_err + ) as rent_err: + scope_err = rent_err ctx._local_error: BaseException = scope_err # XXX: ALWAYS request the context to CANCEL ON any ERROR. # NOTE: `Context.cancel()` is conversely NEVER CALLED in # the `ContextCancelled` "self cancellation absorbed" case # handled in the block above ^^^ !! - # await _debug.pause() + # await debug.pause() # log.cancel( match scope_err: - case trio.Cancelled: + case trio.Cancelled(): logmeth = log.cancel # XXX explicitly report on any non-graceful-taskc cases @@ -2257,15 +2278,15 @@ async def open_context_from_portal( logmeth = log.exception logmeth( - f'ctx {ctx.side!r}-side exited with {ctx.repr_outcome()}\n' + f'ctx {ctx.side!r}-side exited with {ctx.repr_outcome()!r}\n' ) if debug_mode(): - # async with _debug.acquire_debug_lock(portal.actor.uid): + # async with debug.acquire_debug_lock(portal.actor.uid): # pass # TODO: factor ^ into below for non-root cases? # - from .devx._debug import maybe_wait_for_debugger + from .devx.debug import maybe_wait_for_debugger was_acquired: bool = await maybe_wait_for_debugger( # header_msg=( # 'Delaying `ctx.cancel()` until debug lock ' @@ -2278,9 +2299,9 @@ async def open_context_from_portal( 'Calling `ctx.cancel()`!\n' ) - # we don't need to cancel the callee if it already + # we don't need to cancel the child if it already # told us it's cancelled ;p - if ctxc_from_callee is None: + if ctxc_from_child is None: try: await ctx.cancel() except ( @@ -2311,8 +2332,8 @@ async def open_context_from_portal( # via a call to # `Context._maybe_cancel_and_set_remote_error()`. # As per `Context._deliver_msg()`, that error IS - # ALWAYS SET any time "callee" side fails and causes "caller - # side" cancellation via a `ContextCancelled` here. + # ALWAYS SET any time "child" side fails and causes + # "parent side" cancellation via a `ContextCancelled` here. try: result_or_err: Exception|Any = await ctx.wait_for_result() except BaseException as berr: @@ -2328,8 +2349,8 @@ async def open_context_from_portal( raise # yes this worx! - # from .devx import _debug - # await _debug.pause() + # from .devx import debug + # await debug.pause() # an exception type boxed in a `RemoteActorError` # is returned (meaning it was obvi not raised) @@ -2348,7 +2369,7 @@ async def open_context_from_portal( ) case (None, _): log.runtime( - 'Context returned final result from callee task:\n' + 'Context returned final result from child task:\n' f'<= peer: {uid}\n' f' |_ {nsf}()\n\n' @@ -2364,7 +2385,7 @@ async def open_context_from_portal( # where the root is waiting on the lock to clear but the # child has already cleared it and clobbered IPC. if debug_mode(): - from .devx._debug import maybe_wait_for_debugger + from .devx.debug import maybe_wait_for_debugger await maybe_wait_for_debugger() # though it should be impossible for any tasks @@ -2443,7 +2464,7 @@ async def open_context_from_portal( ) # TODO: should we add a `._cancel_req_received` - # flag to determine if the callee manually called + # flag to determine if the child manually called # `ctx.cancel()`? # -[ ] going to need a cid check no? @@ -2499,7 +2520,7 @@ def mk_context( recv_chan: trio.MemoryReceiveChannel send_chan, recv_chan = trio.open_memory_channel(msg_buffer_size) - # TODO: only scan caller-info if log level so high! + # TODO: only scan parent-info if log level so high! from .devx._frame_stack import find_caller_info caller_info: CallerInfo|None = find_caller_info() diff --git a/tractor/_discovery.py b/tractor/_discovery.py index a681c63b..a332ab73 100644 --- a/tractor/_discovery.py +++ b/tractor/_discovery.py @@ -28,8 +28,16 @@ from typing import ( from contextlib import asynccontextmanager as acm from tractor.log import get_logger -from .trionics import gather_contexts -from ._ipc import _connect_chan, Channel +from .trionics import ( + gather_contexts, + collapse_eg, +) +from .ipc import _connect_chan, Channel +from ._addr import ( + UnwrappedAddress, + Address, + wrap_address +) from ._portal import ( Portal, open_portal, @@ -38,6 +46,7 @@ from ._portal import ( from ._state import ( current_actor, _runtime_vars, + _def_tpt_proto, ) if TYPE_CHECKING: @@ -49,9 +58,7 @@ log = get_logger(__name__) @acm async def get_registry( - host: str, - port: int, - + addr: UnwrappedAddress|None = None, ) -> AsyncGenerator[ Portal | LocalPortal | None, None, @@ -69,19 +76,20 @@ async def get_registry( # (likely a re-entrant call from the arbiter actor) yield LocalPortal( actor, - Channel((host, port)) + Channel(transport=None) + # ^XXX, we DO NOT actually provide nor connect an + # underlying transport since this is merely an API shim. ) else: # TODO: try to look pre-existing connection from - # `Actor._peers` and use it instead? + # `Server._peers` and use it instead? async with ( - _connect_chan(host, port) as chan, + _connect_chan(addr) as chan, open_portal(chan) as regstr_ptl, ): yield regstr_ptl - @acm async def get_root( **kwargs, @@ -89,11 +97,10 @@ async def get_root( # TODO: rename mailbox to `_root_maddr` when we finally # add and impl libp2p multi-addrs? - host, port = _runtime_vars['_root_mailbox'] - assert host is not None + addr = _runtime_vars['_root_mailbox'] async with ( - _connect_chan(host, port) as chan, + _connect_chan(addr) as chan, open_portal(chan, **kwargs) as portal, ): yield portal @@ -106,17 +113,23 @@ def get_peer_by_name( ) -> list[Channel]|None: # at least 1 ''' Scan for an existing connection (set) to a named actor - and return any channels from `Actor._peers`. + and return any channels from `Server._peers: dict`. This is an optimization method over querying the registrar for the same info. ''' actor: Actor = current_actor() - to_scan: dict[tuple, list[Channel]] = actor._peers.copy() - pchan: Channel|None = actor._parent_chan - if pchan: - to_scan[pchan.uid].append(pchan) + to_scan: dict[tuple, list[Channel]] = actor.ipc_server._peers.copy() + + # TODO: is this ever needed? creates a duplicate channel on actor._peers + # when multiple find_actor calls are made to same actor from a single ctx + # which causes actor exit to hang waiting forever on + # `actor._no_more_peers.wait()` in `_runtime.async_main` + + # pchan: Channel|None = actor._parent_chan + # if pchan and pchan.uid not in to_scan: + # to_scan[pchan.uid].append(pchan) for aid, chans in to_scan.items(): _, peer_name = aid @@ -134,10 +147,10 @@ def get_peer_by_name( @acm async def query_actor( name: str, - regaddr: tuple[str, int]|None = None, + regaddr: UnwrappedAddress|None = None, ) -> AsyncGenerator[ - tuple[str, int]|None, + UnwrappedAddress|None, None, ]: ''' @@ -163,31 +176,31 @@ async def query_actor( return reg_portal: Portal - regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0] - async with get_registry(*regaddr) as reg_portal: + regaddr: Address = wrap_address(regaddr) or actor.reg_addrs[0] + async with get_registry(regaddr) as reg_portal: # TODO: return portals to all available actors - for now # just the last one that registered - sockaddr: tuple[str, int] = await reg_portal.run_from_ns( + addr: UnwrappedAddress = await reg_portal.run_from_ns( 'self', 'find_actor', name=name, ) - yield sockaddr + yield addr @acm async def maybe_open_portal( - addr: tuple[str, int], + addr: UnwrappedAddress, name: str, ): async with query_actor( name=name, regaddr=addr, - ) as sockaddr: + ) as addr: pass - if sockaddr: - async with _connect_chan(*sockaddr) as chan: + if addr: + async with _connect_chan(addr) as chan: async with open_portal(chan) as portal: yield portal else: @@ -197,7 +210,8 @@ async def maybe_open_portal( @acm async def find_actor( name: str, - registry_addrs: list[tuple[str, int]]|None = None, + registry_addrs: list[UnwrappedAddress]|None = None, + enable_transports: list[str] = [_def_tpt_proto], only_first: bool = True, raise_on_none: bool = False, @@ -224,15 +238,15 @@ async def find_actor( # XXX NOTE: make sure to dynamically read the value on # every call since something may change it globally (eg. # like in our discovery test suite)! - from . import _root + from ._addr import default_lo_addrs registry_addrs = ( _runtime_vars['_registry_addrs'] or - _root._default_lo_addrs + default_lo_addrs(enable_transports) ) maybe_portals: list[ - AsyncContextManager[tuple[str, int]] + AsyncContextManager[UnwrappedAddress] ] = list( maybe_open_portal( addr=addr, @@ -241,9 +255,12 @@ async def find_actor( for addr in registry_addrs ) portals: list[Portal] - async with gather_contexts( - mngrs=maybe_portals, - ) as portals: + async with ( + collapse_eg(), + gather_contexts( + mngrs=maybe_portals, + ) as portals, + ): # log.runtime( # 'Gathered portals:\n' # f'{portals}' @@ -274,7 +291,7 @@ async def find_actor( @acm async def wait_for_actor( name: str, - registry_addr: tuple[str, int] | None = None, + registry_addr: UnwrappedAddress | None = None, ) -> AsyncGenerator[Portal, None]: ''' @@ -291,7 +308,7 @@ async def wait_for_actor( yield peer_portal return - regaddr: tuple[str, int] = ( + regaddr: UnwrappedAddress = ( registry_addr or actor.reg_addrs[0] @@ -299,8 +316,8 @@ async def wait_for_actor( # TODO: use `.trionics.gather_contexts()` like # above in `find_actor()` as well? reg_portal: Portal - async with get_registry(*regaddr) as reg_portal: - sockaddrs = await reg_portal.run_from_ns( + async with get_registry(regaddr) as reg_portal: + addrs = await reg_portal.run_from_ns( 'self', 'wait_for_actor', name=name, @@ -308,8 +325,8 @@ async def wait_for_actor( # get latest registered addr by default? # TODO: offer multi-portal yields in multi-homed case? - sockaddr: tuple[str, int] = sockaddrs[-1] + addr: UnwrappedAddress = addrs[-1] - async with _connect_chan(*sockaddr) as chan: + async with _connect_chan(addr) as chan: async with open_portal(chan) as portal: yield portal diff --git a/tractor/_entry.py b/tractor/_entry.py index 8156d25f..68e72501 100644 --- a/tractor/_entry.py +++ b/tractor/_entry.py @@ -21,8 +21,7 @@ Sub-process entry points. from __future__ import annotations from functools import partial import multiprocessing as mp -import os -import textwrap +# import os from typing import ( Any, TYPE_CHECKING, @@ -35,8 +34,13 @@ from .log import ( get_logger, ) from . import _state -from .devx import _debug +from .devx import ( + _frame_stack, + pformat, +) +# from .msg import pretty_struct from .to_asyncio import run_as_asyncio_guest +from ._addr import UnwrappedAddress from ._runtime import ( async_main, Actor, @@ -52,10 +56,10 @@ log = get_logger(__name__) def _mp_main( actor: Actor, - accept_addrs: list[tuple[str, int]], + accept_addrs: list[UnwrappedAddress], forkserver_info: tuple[Any, Any, Any, Any, Any], start_method: SpawnMethodKey, - parent_addr: tuple[str, int] | None = None, + parent_addr: UnwrappedAddress | None = None, infect_asyncio: bool = False, ) -> None: @@ -102,111 +106,10 @@ def _mp_main( ) -# TODO: move this func to some kinda `.devx._conc_lang.py` eventually -# as we work out our multi-domain state-flow-syntax! -def nest_from_op( - input_op: str, - # - # ?TODO? an idea for a syntax to the state of concurrent systems - # as a "3-domain" (execution, scope, storage) model and using - # a minimal ascii/utf-8 operator-set. - # - # try not to take any of this seriously yet XD - # - # > is a "play operator" indicating (CPU bound) - # exec/work/ops required at the "lowest level computing" - # - # execution primititves (tasks, threads, actors..) denote their - # lifetime with '(' and ')' since parentheses normally are used - # in many langs to denote function calls. - # - # starting = ( - # >( opening/starting; beginning of the thread-of-exec (toe?) - # (> opened/started, (finished spawning toe) - # |_ repr of toe, in py these look like - # - # >) closing/exiting/stopping, - # )> closed/exited/stopped, - # |_ - # [OR <), )< ?? ] - # - # ending = ) - # >c) cancelling to close/exit - # c)> cancelled (caused close), OR? - # |_ - # OR maybe "x) erroring to eventuall exit - # x)> errored and terminated - # |_ - # - # scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc. - # >{ opening - # {> opened - # }> closed - # >} closing - # - # storage: like queues, shm-buffers, files, etc.. - # >[ opening - # [> opened - # |_ - # - # >] closing - # ]> closed - - # IPC ops: channels, transports, msging - # => req msg - # <= resp msg - # <=> 2-way streaming (of msgs) - # <- recv 1 msg - # -> send 1 msg - # - # TODO: still not sure on R/L-HS approach..? - # =>( send-req to exec start (task, actor, thread..) - # (<= recv-req to ^ - # - # (<= recv-req ^ - # <=( recv-resp opened remote exec primitive - # <=) recv-resp closed - # - # )<=c req to stop due to cancel - # c=>) req to stop due to cancel - # - # =>{ recv-req to open - # <={ send-status that it closed - - tree_str: str, - - # NOTE: so move back-from-the-left of the `input_op` by - # this amount. - back_from_op: int = 0, -) -> str: - ''' - Depth-increment the input (presumably hierarchy/supervision) - input "tree string" below the provided `input_op` execution - operator, so injecting a `"\n|_{input_op}\n"`and indenting the - `tree_str` to nest content aligned with the ops last char. - - ''' - return ( - f'{input_op}\n' - + - textwrap.indent( - tree_str, - prefix=( - len(input_op) - - - (back_from_op + 1) - ) * ' ', - ) - ) - - def _trio_main( actor: Actor, *, - parent_addr: tuple[str, int] | None = None, + parent_addr: UnwrappedAddress|None = None, infect_asyncio: bool = False, ) -> None: @@ -214,7 +117,7 @@ def _trio_main( Entry point for a `trio_run_in_process` subactor. ''' - _debug.hide_runtime_frames() + _frame_stack.hide_runtime_frames() _state._current_actor = actor trio_main = partial( @@ -225,30 +128,23 @@ def _trio_main( if actor.loglevel is not None: get_console_log(actor.loglevel) - actor_info: str = ( - f'|_{actor}\n' - f' uid: {actor.uid}\n' - f' pid: {os.getpid()}\n' - f' parent_addr: {parent_addr}\n' - f' loglevel: {actor.loglevel}\n' - ) log.info( - 'Starting new `trio` subactor:\n' + f'Starting `trio` subactor from parent @ ' + f'{parent_addr}\n' + - nest_from_op( + pformat.nest_from_op( input_op='>(', # see syntax ideas above - tree_str=actor_info, - back_from_op=2, # since "complete" + text=f'{actor}', ) ) logmeth = log.info exit_status: str = ( 'Subactor exited\n' + - nest_from_op( + pformat.nest_from_op( input_op=')>', # like a "closed-to-play"-icon from super perspective - tree_str=actor_info, - back_from_op=1, + text=f'{actor}', + nest_indent=1, ) ) try: @@ -263,9 +159,9 @@ def _trio_main( exit_status: str = ( 'Actor received KBI (aka an OS-cancel)\n' + - nest_from_op( + pformat.nest_from_op( input_op='c)>', # closed due to cancel (see above) - tree_str=actor_info, + text=f'{actor}', ) ) except BaseException as err: @@ -273,9 +169,9 @@ def _trio_main( exit_status: str = ( 'Main actor task exited due to crash?\n' + - nest_from_op( + pformat.nest_from_op( input_op='x)>', # closed by error - tree_str=actor_info, + text=f'{actor}', ) ) # NOTE since we raise a tb will already be shown on the diff --git a/tractor/_exceptions.py b/tractor/_exceptions.py index f9e18e18..418accc3 100644 --- a/tractor/_exceptions.py +++ b/tractor/_exceptions.py @@ -23,7 +23,6 @@ import builtins import importlib from pprint import pformat from pdb import bdb -import sys from types import ( TracebackType, ) @@ -65,15 +64,29 @@ if TYPE_CHECKING: from ._context import Context from .log import StackLevelAdapter from ._stream import MsgStream - from ._ipc import Channel + from .ipc import Channel log = get_logger('tractor') _this_mod = importlib.import_module(__name__) -class ActorFailure(Exception): - "General actor failure" +class RuntimeFailure(RuntimeError): + ''' + General `Actor`-runtime failure due to, + + - a bad runtime-env, + - falied spawning (bad input to process), + - API usage. + + ''' + + +class ActorFailure(RuntimeFailure): + ''' + `Actor` failed to boot before/after spawn + + ''' class InternalError(RuntimeError): @@ -126,6 +139,12 @@ class TrioTaskExited(Exception): ''' +class DebugRequestError(RuntimeError): + ''' + Failed to request stdio lock from root actor! + + ''' + # NOTE: more or less should be close to these: # 'boxed_type', # 'src_type', @@ -191,6 +210,8 @@ def get_err_type(type_name: str) -> BaseException|None: ): return type_ref + return None + def pack_from_raise( local_err: ( @@ -521,7 +542,6 @@ class RemoteActorError(Exception): if val: _repr += f'{key}={val_str}{end_char}' - return _repr def reprol(self) -> str: @@ -600,56 +620,9 @@ class RemoteActorError(Exception): the type name is already implicitly shown by python). ''' - header: str = '' - body: str = '' - message: str = '' - - # XXX when the currently raised exception is this instance, - # we do not ever use the "type header" style repr. - is_being_raised: bool = False - if ( - (exc := sys.exception()) - and - exc is self - ): - is_being_raised: bool = True - - with_type_header: bool = ( - with_type_header - and - not is_being_raised - ) - - # style - if with_type_header: - header: str = f'<{type(self).__name__}(' - - if message := self._message: - - # split off the first line so, if needed, it isn't - # indented the same like the "boxed content" which - # since there is no `.tb_str` is just the `.message`. - lines: list[str] = message.splitlines() - first: str = lines[0] - message: str = message.removeprefix(first) - - # with a type-style header we, - # - have no special message "first line" extraction/handling - # - place the message a space in from the header: - # `MsgTypeError( ..` - # ^-here - # - indent the `.message` inside the type body. - if with_type_header: - first = f' {first} )>' - - message: str = textwrap.indent( - message, - prefix=' '*2, - ) - message: str = first + message - # IFF there is an embedded traceback-str we always # draw the ascii-box around it. + body: str = '' if tb_str := self.tb_str: fields: str = self._mk_fields_str( _body_fields @@ -670,21 +643,15 @@ class RemoteActorError(Exception): boxer_header=self.relay_uid, ) - tail = '' - if ( - with_type_header - and not message - ): - tail: str = '>' - - return ( - header - + - message - + - f'{body}' - + - tail + # !TODO, it'd be nice to import these top level without + # cycles! + from tractor.devx.pformat import ( + pformat_exc, + ) + return pformat_exc( + exc=self, + with_type_header=with_type_header, + body=body, ) __repr__ = pformat @@ -962,7 +929,7 @@ class StreamOverrun( ''' -class TransportClosed(trio.BrokenResourceError): +class TransportClosed(Exception): ''' IPC transport (protocol) connection was closed or broke and indicates that the wrapping communication `Channel` can no longer @@ -973,24 +940,39 @@ class TransportClosed(trio.BrokenResourceError): self, message: str, loglevel: str = 'transport', - cause: BaseException|None = None, + src_exc: Exception|None = None, raise_on_report: bool = False, ) -> None: self.message: str = message - self._loglevel = loglevel + self._loglevel: str = loglevel super().__init__(message) - if cause is not None: - self.__cause__ = cause + self._src_exc = src_exc + # set the cause manually if not already set by python + if ( + src_exc is not None + and + not self.__cause__ + ): + self.__cause__ = src_exc # flag to toggle whether the msg loop should raise # the exc in its `TransportClosed` handler block. self._raise_on_report = raise_on_report + @property + def src_exc(self) -> Exception: + return ( + self.__cause__ + or + self._src_exc + ) + def report_n_maybe_raise( self, message: str|None = None, + hide_tb: bool = True, ) -> None: ''' @@ -998,9 +980,10 @@ class TransportClosed(trio.BrokenResourceError): for this error. ''' + __tracebackhide__: bool = hide_tb message: str = message or self.message # when a cause is set, slap it onto the log emission. - if cause := self.__cause__: + if cause := self.src_exc: cause_tb_str: str = ''.join( traceback.format_tb(cause.__traceback__) ) @@ -1009,13 +992,86 @@ class TransportClosed(trio.BrokenResourceError): f' {cause}\n' # exc repr ) - getattr(log, self._loglevel)(message) + getattr( + log, + self._loglevel + )(message) # some errors we want to blow up from # inside the RPC msg loop if self._raise_on_report: raise self from cause + @classmethod + def repr_src_exc( + self, + src_exc: Exception|None = None, + ) -> str: + + if src_exc is None: + return '' + + src_msg: tuple[str] = src_exc.args + src_exc_repr: str = ( + f'{type(src_exc).__name__}[ {src_msg} ]' + ) + return src_exc_repr + + def pformat(self) -> str: + from tractor.devx.pformat import ( + pformat_exc, + ) + return pformat_exc( + exc=self, + ) + + # delegate to `str`-ified pformat + __repr__ = pformat + + @classmethod + def from_src_exc( + cls, + src_exc: ( + Exception| + trio.ClosedResource| + trio.BrokenResourceError + ), + message: str, + body: str = '', + **init_kws, + ) -> TransportClosed: + ''' + Convenience constructor for creation from an underlying + `trio`-sourced async-resource/chan/stream error. + + Embeds the original `src_exc`'s repr within the + `Exception.args` via a first-line-in-`.message`-put-in-header + pre-processing and allows inserting additional content beyond + the main message via a `body: str`. + + ''' + repr_src_exc: str = cls.repr_src_exc( + src_exc, + ) + next_line: str = f' src_exc: {repr_src_exc}\n' + if body: + body: str = textwrap.indent( + body, + prefix=' '*2, + ) + + return TransportClosed( + message=( + message + + + next_line + + + body + ), + src_exc=src_exc, + **init_kws, + ) + class NoResult(RuntimeError): "No final result is expected for this actor" @@ -1190,55 +1246,6 @@ def unpack_error( return exc -def is_multi_cancelled( - exc: BaseException|BaseExceptionGroup, - - ignore_nested: set[BaseException] = set(), - -) -> bool|BaseExceptionGroup: - ''' - Predicate to determine if an `BaseExceptionGroup` only contains - some (maybe nested) set of sub-grouped exceptions (like only - `trio.Cancelled`s which get swallowed silently by default) and is - thus the result of "gracefully cancelling" a collection of - sub-tasks (or other conc primitives) and receiving a "cancelled - ACK" from each after termination. - - Docs: - ---- - - https://docs.python.org/3/library/exceptions.html#exception-groups - - https://docs.python.org/3/library/exceptions.html#BaseExceptionGroup.subgroup - - ''' - - if ( - not ignore_nested - or - trio.Cancelled in ignore_nested - # XXX always count-in `trio`'s native signal - ): - ignore_nested.update({trio.Cancelled}) - - if isinstance(exc, BaseExceptionGroup): - matched_exc: BaseExceptionGroup|None = exc.subgroup( - tuple(ignore_nested), - - # TODO, complain about why not allowed XD - # condition=tuple(ignore_nested), - ) - if matched_exc is not None: - return matched_exc - - # NOTE, IFF no excs types match (throughout the error-tree) - # -> return `False`, OW return the matched sub-eg. - # - # IOW, for the inverse of ^ for the purpose of - # maybe-enter-REPL--logic: "only debug when the err-tree contains - # at least one exc-type NOT in `ignore_nested`" ; i.e. the case where - # we fallthrough and return `False` here. - return False - - def _raise_from_unexpected_msg( ctx: Context, msg: MsgType, diff --git a/tractor/_ipc.py b/tractor/_ipc.py deleted file mode 100644 index 83186147..00000000 --- a/tractor/_ipc.py +++ /dev/null @@ -1,820 +0,0 @@ -# tractor: structured concurrent "actors". -# Copyright 2018-eternity Tyler Goodlet. - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -""" -Inter-process comms abstractions - -""" -from __future__ import annotations -from collections.abc import ( - AsyncGenerator, - AsyncIterator, -) -from contextlib import ( - asynccontextmanager as acm, - contextmanager as cm, -) -import platform -from pprint import pformat -import struct -import typing -from typing import ( - Any, - Callable, - runtime_checkable, - Protocol, - Type, - TypeVar, -) - -import msgspec -from tricycle import BufferedReceiveStream -import trio - -from tractor.log import get_logger -from tractor._exceptions import ( - MsgTypeError, - pack_from_raise, - TransportClosed, - _mk_send_mte, - _mk_recv_mte, -) -from tractor.msg import ( - _ctxvar_MsgCodec, - # _codec, XXX see `self._codec` sanity/debug checks - MsgCodec, - types as msgtypes, - pretty_struct, -) - -log = get_logger(__name__) - -_is_windows = platform.system() == 'Windows' - - -def get_stream_addrs( - stream: trio.SocketStream -) -> tuple[ - tuple[str, int], # local - tuple[str, int], # remote -]: - ''' - Return the `trio` streaming transport prot's socket-addrs for - both the local and remote sides as a pair. - - ''' - # rn, should both be IP sockets - lsockname = stream.socket.getsockname() - rsockname = stream.socket.getpeername() - return ( - tuple(lsockname[:2]), - tuple(rsockname[:2]), - ) - - -# from tractor.msg.types import MsgType -# ?TODO? this should be our `Union[*msgtypes.__spec__]` alias now right..? -# => BLEH, except can't bc prots must inherit typevar or param-spec -# vars.. -MsgType = TypeVar('MsgType') - - -# TODO: break up this mod into a subpkg so we can start adding new -# backends and move this type stuff into a dedicated file.. Bo -# -@runtime_checkable -class MsgTransport(Protocol[MsgType]): -# -# ^-TODO-^ consider using a generic def and indexing with our -# eventual msg definition/types? -# - https://docs.python.org/3/library/typing.html#typing.Protocol - - stream: trio.SocketStream - drained: list[MsgType] - - def __init__(self, stream: trio.SocketStream) -> None: - ... - - # XXX: should this instead be called `.sendall()`? - async def send(self, msg: MsgType) -> None: - ... - - async def recv(self) -> MsgType: - ... - - def __aiter__(self) -> MsgType: - ... - - def connected(self) -> bool: - ... - - # defining this sync otherwise it causes a mypy error because it - # can't figure out it's a generator i guess?..? - def drain(self) -> AsyncIterator[dict]: - ... - - @property - def laddr(self) -> tuple[str, int]: - ... - - @property - def raddr(self) -> tuple[str, int]: - ... - - -# TODO: typing oddity.. not sure why we have to inherit here, but it -# seems to be an issue with `get_msg_transport()` returning -# a `Type[Protocol]`; probably should make a `mypy` issue? -class MsgpackTCPStream(MsgTransport): - ''' - A ``trio.SocketStream`` delivering ``msgpack`` formatted data - using the ``msgspec`` codec lib. - - ''' - layer_key: int = 4 - name_key: str = 'tcp' - - # TODO: better naming for this? - # -[ ] check how libp2p does naming for such things? - codec_key: str = 'msgpack' - - def __init__( - self, - stream: trio.SocketStream, - prefix_size: int = 4, - - # XXX optionally provided codec pair for `msgspec`: - # https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types - # - # TODO: define this as a `Codec` struct which can be - # overriden dynamically by the application/runtime? - codec: tuple[ - Callable[[Any], Any]|None, # coder - Callable[[type, Any], Any]|None, # decoder - ]|None = None, - - ) -> None: - - self.stream = stream - assert self.stream.socket - - # should both be IP sockets - self._laddr, self._raddr = get_stream_addrs(stream) - - # create read loop instance - self._aiter_pkts = self._iter_packets() - self._send_lock = trio.StrictFIFOLock() - - # public i guess? - self.drained: list[dict] = [] - - self.recv_stream = BufferedReceiveStream( - transport_stream=stream - ) - self.prefix_size = prefix_size - - # allow for custom IPC msg interchange format - # dynamic override Bo - self._task = trio.lowlevel.current_task() - - # XXX for ctxvar debug only! - # self._codec: MsgCodec = ( - # codec - # or - # _codec._ctxvar_MsgCodec.get() - # ) - - async def _iter_packets(self) -> AsyncGenerator[dict, None]: - ''' - Yield `bytes`-blob decoded packets from the underlying TCP - stream using the current task's `MsgCodec`. - - This is a streaming routine implemented as an async generator - func (which was the original design, but could be changed?) - and is allocated by a `.__call__()` inside `.__init__()` where - it is assigned to the `._aiter_pkts` attr. - - ''' - decodes_failed: int = 0 - - while True: - try: - header: bytes = await self.recv_stream.receive_exactly(4) - except ( - ValueError, - ConnectionResetError, - - # not sure entirely why we need this but without it we - # seem to be getting racy failures here on - # arbiter/registry name subs.. - trio.BrokenResourceError, - - ) as trans_err: - - loglevel = 'transport' - match trans_err: - # case ( - # ConnectionResetError() - # ): - # loglevel = 'transport' - - # peer actor (graceful??) TCP EOF but `tricycle` - # seems to raise a 0-bytes-read? - case ValueError() if ( - 'unclean EOF' in trans_err.args[0] - ): - pass - - # peer actor (task) prolly shutdown quickly due - # to cancellation - case trio.BrokenResourceError() if ( - 'Connection reset by peer' in trans_err.args[0] - ): - pass - - # unless the disconnect condition falls under "a - # normal operation breakage" we usualy console warn - # about it. - case _: - loglevel: str = 'warning' - - - raise TransportClosed( - message=( - f'IPC transport already closed by peer\n' - f'x]> {type(trans_err)}\n' - f' |_{self}\n' - ), - loglevel=loglevel, - ) from trans_err - - # XXX definitely can happen if transport is closed - # manually by another `trio.lowlevel.Task` in the - # same actor; we use this in some simulated fault - # testing for ex, but generally should never happen - # under normal operation! - # - # NOTE: as such we always re-raise this error from the - # RPC msg loop! - except trio.ClosedResourceError as closure_err: - raise TransportClosed( - message=( - f'IPC transport already manually closed locally?\n' - f'x]> {type(closure_err)} \n' - f' |_{self}\n' - ), - loglevel='error', - raise_on_report=( - closure_err.args[0] == 'another task closed this fd' - or - closure_err.args[0] in ['another task closed this fd'] - ), - ) from closure_err - - # graceful TCP EOF disconnect - if header == b'': - raise TransportClosed( - message=( - f'IPC transport already gracefully closed\n' - f']>\n' - f' |_{self}\n' - ), - loglevel='transport', - # cause=??? # handy or no? - ) - - size: int - size, = struct.unpack(" None: - ''' - Send a msgpack encoded py-object-blob-as-msg over TCP. - - If `strict_types == True` then a `MsgTypeError` will be raised on any - invalid msg type - - ''' - __tracebackhide__: bool = hide_tb - - # XXX see `trio._sync.AsyncContextManagerMixin` for details - # on the `.acquire()`/`.release()` sequencing.. - async with self._send_lock: - - # NOTE: lookup the `trio.Task.context`'s var for - # the current `MsgCodec`. - codec: MsgCodec = _ctxvar_MsgCodec.get() - - # XXX for ctxvar debug only! - # if self._codec.pld_spec != codec.pld_spec: - # self._codec = codec - # log.runtime( - # f'Using new codec in {self}.send()\n' - # f'codec: {self._codec}\n\n' - # f'msg: {msg}\n' - # ) - - if type(msg) not in msgtypes.__msg_types__: - if strict_types: - raise _mk_send_mte( - msg, - codec=codec, - ) - else: - log.warning( - 'Sending non-`Msg`-spec msg?\n\n' - f'{msg}\n' - ) - - try: - bytes_data: bytes = codec.encode(msg) - except TypeError as _err: - typerr = _err - msgtyperr: MsgTypeError = _mk_send_mte( - msg, - codec=codec, - message=( - f'IPC-msg-spec violation in\n\n' - f'{pretty_struct.Struct.pformat(msg)}' - ), - src_type_error=typerr, - ) - raise msgtyperr from typerr - - # supposedly the fastest says, - # https://stackoverflow.com/a/54027962 - size: bytes = struct.pack(" - # except BaseException as _err: - # err = _err - # if not isinstance(err, MsgTypeError): - # __tracebackhide__: bool = False - # raise - - @property - def laddr(self) -> tuple[str, int]: - return self._laddr - - @property - def raddr(self) -> tuple[str, int]: - return self._raddr - - async def recv(self) -> Any: - return await self._aiter_pkts.asend(None) - - async def drain(self) -> AsyncIterator[dict]: - ''' - Drain the stream's remaining messages sent from - the far end until the connection is closed by - the peer. - - ''' - try: - async for msg in self._iter_packets(): - self.drained.append(msg) - except TransportClosed: - for msg in self.drained: - yield msg - - def __aiter__(self): - return self._aiter_pkts - - def connected(self) -> bool: - return self.stream.socket.fileno() != -1 - - -def get_msg_transport( - - key: tuple[str, str], - -) -> Type[MsgTransport]: - - return { - ('msgpack', 'tcp'): MsgpackTCPStream, - }[key] - - -class Channel: - ''' - An inter-process channel for communication between (remote) actors. - - Wraps a ``MsgStream``: transport + encoding IPC connection. - - Currently we only support ``trio.SocketStream`` for transport - (aka TCP) and the ``msgpack`` interchange format via the ``msgspec`` - codec libary. - - ''' - def __init__( - - self, - destaddr: tuple[str, int]|None, - - msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'), - - # TODO: optional reconnection support? - # auto_reconnect: bool = False, - # on_reconnect: typing.Callable[..., typing.Awaitable] = None, - - ) -> None: - - # self._recon_seq = on_reconnect - # self._autorecon = auto_reconnect - - self._destaddr = destaddr - self._transport_key = msg_transport_type_key - - # Either created in ``.connect()`` or passed in by - # user in ``.from_stream()``. - self._stream: trio.SocketStream|None = None - self._transport: MsgTransport|None = None - - # set after handshake - always uid of far end - self.uid: tuple[str, str]|None = None - - self._aiter_msgs = self._iter_msgs() - self._exc: Exception|None = None # set if far end actor errors - self._closed: bool = False - - # flag set by ``Portal.cancel_actor()`` indicating remote - # (possibly peer) cancellation of the far end actor - # runtime. - self._cancel_called: bool = False - - @property - def msgstream(self) -> MsgTransport: - log.info( - '`Channel.msgstream` is an old name, use `._transport`' - ) - return self._transport - - @property - def transport(self) -> MsgTransport: - return self._transport - - @classmethod - def from_stream( - cls, - stream: trio.SocketStream, - **kwargs, - - ) -> Channel: - - src, dst = get_stream_addrs(stream) - chan = Channel( - destaddr=dst, - **kwargs, - ) - - # set immediately here from provided instance - chan._stream: trio.SocketStream = stream - chan.set_msg_transport(stream) - return chan - - def set_msg_transport( - self, - stream: trio.SocketStream, - type_key: tuple[str, str]|None = None, - - # XXX optionally provided codec pair for `msgspec`: - # https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types - codec: MsgCodec|None = None, - - ) -> MsgTransport: - type_key = ( - type_key - or - self._transport_key - ) - # get transport type, then - self._transport = get_msg_transport( - type_key - # instantiate an instance of the msg-transport - )( - stream, - codec=codec, - ) - return self._transport - - @cm - def apply_codec( - self, - codec: MsgCodec, - - ) -> None: - ''' - Temporarily override the underlying IPC msg codec for - dynamic enforcement of messaging schema. - - ''' - orig: MsgCodec = self._transport.codec - try: - self._transport.codec = codec - yield - finally: - self._transport.codec = orig - - # TODO: do a .src/.dst: str for maddrs? - def __repr__(self) -> str: - if not self._transport: - return '' - - return repr( - self._transport.stream.socket._sock - ).replace( # type: ignore - "socket.socket", - "Channel", - ) - - @property - def laddr(self) -> tuple[str, int]|None: - return self._transport.laddr if self._transport else None - - @property - def raddr(self) -> tuple[str, int]|None: - return self._transport.raddr if self._transport else None - - async def connect( - self, - destaddr: tuple[Any, ...] | None = None, - **kwargs - - ) -> MsgTransport: - - if self.connected(): - raise RuntimeError("channel is already connected?") - - destaddr = destaddr or self._destaddr - assert isinstance(destaddr, tuple) - - stream = await trio.open_tcp_stream( - *destaddr, - **kwargs - ) - transport = self.set_msg_transport(stream) - - log.transport( - f'Opened channel[{type(transport)}]: {self.laddr} -> {self.raddr}' - ) - return transport - - # TODO: something like, - # `pdbp.hideframe_on(errors=[MsgTypeError])` - # instead of the `try/except` hack we have rn.. - # seems like a pretty useful thing to have in general - # along with being able to filter certain stack frame(s / sets) - # possibly based on the current log-level? - async def send( - self, - payload: Any, - - hide_tb: bool = False, - - ) -> None: - ''' - Send a coded msg-blob over the transport. - - ''' - __tracebackhide__: bool = hide_tb - try: - log.transport( - '=> send IPC msg:\n\n' - f'{pformat(payload)}\n' - ) - # assert self._transport # but why typing? - await self._transport.send( - payload, - hide_tb=hide_tb, - ) - except BaseException as _err: - err = _err # bind for introspection - if not isinstance(_err, MsgTypeError): - # assert err - __tracebackhide__: bool = False - else: - assert err.cid - - raise - - async def recv(self) -> Any: - assert self._transport - return await self._transport.recv() - - # TODO: auto-reconnect features like 0mq/nanomsg? - # -[ ] implement it manually with nods to SC prot - # possibly on multiple transport backends? - # -> seems like that might be re-inventing scalability - # prots tho no? - # try: - # return await self._transport.recv() - # except trio.BrokenResourceError: - # if self._autorecon: - # await self._reconnect() - # return await self.recv() - # raise - - async def aclose(self) -> None: - - log.transport( - f'Closing channel to {self.uid} ' - f'{self.laddr} -> {self.raddr}' - ) - assert self._transport - await self._transport.stream.aclose() - self._closed = True - - async def __aenter__(self): - await self.connect() - return self - - async def __aexit__(self, *args): - await self.aclose(*args) - - def __aiter__(self): - return self._aiter_msgs - - # ?TODO? run any reconnection sequence? - # -[ ] prolly should be impl-ed as deco-API? - # - # async def _reconnect(self) -> None: - # """Handle connection failures by polling until a reconnect can be - # established. - # """ - # down = False - # while True: - # try: - # with trio.move_on_after(3) as cancel_scope: - # await self.connect() - # cancelled = cancel_scope.cancelled_caught - # if cancelled: - # log.transport( - # "Reconnect timed out after 3 seconds, retrying...") - # continue - # else: - # log.transport("Stream connection re-established!") - - # # on_recon = self._recon_seq - # # if on_recon: - # # await on_recon(self) - - # break - # except (OSError, ConnectionRefusedError): - # if not down: - # down = True - # log.transport( - # f"Connection to {self.raddr} went down, waiting" - # " for re-establishment") - # await trio.sleep(1) - - async def _iter_msgs( - self - ) -> AsyncGenerator[Any, None]: - ''' - Yield `MsgType` IPC msgs decoded and deliverd from - an underlying `MsgTransport` protocol. - - This is a streaming routine alo implemented as an async-gen - func (same a `MsgTransport._iter_pkts()`) gets allocated by - a `.__call__()` inside `.__init__()` where it is assigned to - the `._aiter_msgs` attr. - - ''' - assert self._transport - while True: - try: - async for msg in self._transport: - match msg: - # NOTE: if transport/interchange delivers - # a type error, we pack it with the far - # end peer `Actor.uid` and relay the - # `Error`-msg upward to the `._rpc` stack - # for normal RAE handling. - case MsgTypeError(): - yield pack_from_raise( - local_err=msg, - cid=msg.cid, - - # XXX we pack it here bc lower - # layers have no notion of an - # actor-id ;) - src_uid=self.uid, - ) - case _: - yield msg - - except trio.BrokenResourceError: - - # if not self._autorecon: - raise - - await self.aclose() - - # if self._autorecon: # attempt reconnect - # await self._reconnect() - # continue - - def connected(self) -> bool: - return self._transport.connected() if self._transport else False - - -@acm -async def _connect_chan( - host: str, - port: int - -) -> typing.AsyncGenerator[Channel, None]: - ''' - Create and connect a channel with disconnect on context manager - teardown. - - ''' - chan = Channel((host, port)) - await chan.connect() - yield chan - with trio.CancelScope(shield=True): - await chan.aclose() diff --git a/tractor/_portal.py b/tractor/_portal.py index cee10c47..69133528 100644 --- a/tractor/_portal.py +++ b/tractor/_portal.py @@ -39,11 +39,14 @@ import warnings import trio -from .trionics import maybe_open_nursery +from .trionics import ( + maybe_open_nursery, + collapse_eg, +) from ._state import ( current_actor, ) -from ._ipc import Channel +from .ipc import Channel from .log import get_logger from .msg import ( # Error, @@ -52,8 +55,8 @@ from .msg import ( Return, ) from ._exceptions import ( - # unpack_error, NoResult, + TransportClosed, ) from ._context import ( Context, @@ -107,10 +110,18 @@ class Portal: # point. self._expect_result_ctx: Context|None = None self._streams: set[MsgStream] = set() + + # TODO, this should be PRIVATE (and never used publicly)! since it's just + # a cached ref to the local runtime instead of calling + # `current_actor()` everywhere.. XD self.actor: Actor = current_actor() @property def chan(self) -> Channel: + ''' + Ref to this ctx's underlying `tractor.ipc.Channel`. + + ''' return self._chan @property @@ -170,10 +181,17 @@ class Portal: # not expecting a "main" result if self._expect_result_ctx is None: + peer_id: str = f'{self.channel.aid.reprol()!r}' log.warning( - f"Portal for {self.channel.uid} not expecting a final" - " result?\nresult() should only be called if subactor" - " was spawned with `ActorNursery.run_in_actor()`") + f'Portal to peer {peer_id} will not deliver a final result?\n' + f'\n' + f'Context.result() can only be called by the parent of ' + f'a sub-actor when it was spawned with ' + f'`ActorNursery.run_in_actor()`' + f'\n' + f'Further this `ActorNursery`-method-API will deprecated in the' + f'near fututre!\n' + ) return NoResult # expecting a "main" result @@ -206,6 +224,7 @@ class Portal: typname: str = type(self).__name__ log.warning( f'`{typname}.result()` is DEPRECATED!\n' + f'\n' f'Use `{typname}.wait_for_result()` instead!\n' ) return await self.wait_for_result( @@ -217,8 +236,10 @@ class Portal: # terminate all locally running async generator # IPC calls if self._streams: - log.cancel( - f"Cancelling all streams with {self.channel.uid}") + peer_id: str = f'{self.channel.aid.reprol()!r}' + report: str = ( + f'Cancelling all msg-streams with {peer_id}\n' + ) for stream in self._streams.copy(): try: await stream.aclose() @@ -227,10 +248,18 @@ class Portal: # (unless of course at some point down the road we # won't expect this to always be the case or need to # detect it for respawning purposes?) - log.debug(f"{stream} was already closed.") + report += ( + f'->) {stream!r} already closed\n' + ) + + log.cancel(report) async def aclose(self): - log.debug(f"Closing {self}") + log.debug( + f'Closing portal\n' + f'>}}\n' + f'|_{self}\n' + ) # TODO: once we move to implementing our own `ReceiveChannel` # (including remote task cancellation inside its `.aclose()`) # we'll need to .aclose all those channels here @@ -256,23 +285,22 @@ class Portal: __runtimeframe__: int = 1 # noqa chan: Channel = self.channel + peer_id: str = f'{self.channel.aid.reprol()!r}' if not chan.connected(): log.runtime( - 'This channel is already closed, skipping cancel request..' + 'Peer {peer_id} is already disconnected\n' + '-> skipping cancel request..\n' ) return False - reminfo: str = ( - f'c)=> {self.channel.uid}\n' - f' |_{chan}\n' - ) log.cancel( - f'Requesting actor-runtime cancel for peer\n\n' - f'{reminfo}' + f'Sending actor-runtime-cancel-req to peer\n' + f'\n' + f'c)=> {peer_id}\n' ) # XXX the one spot we set it? - self.channel._cancel_called: bool = True + chan._cancel_called: bool = True try: # send cancel cmd - might not get response # XXX: sure would be nice to make this work with @@ -293,22 +321,43 @@ class Portal: # may timeout and we never get an ack (obvi racy) # but that doesn't mean it wasn't cancelled. log.debug( - 'May have failed to cancel peer?\n' - f'{reminfo}' + f'May have failed to cancel peer?\n' + f'\n' + f'c)=?> {peer_id}\n' ) # if we get here some weird cancellation case happened return False except ( + # XXX, should never really get raised unless we aren't + # wrapping them in the below type by mistake? + # + # Leaving the catch here for now until we're very sure + # all the cases (for various tpt protos) have indeed been + # re-wrapped ;p trio.ClosedResourceError, trio.BrokenResourceError, - ): - log.debug( - 'IPC chan for actor already closed or broken?\n\n' - f'{self.channel.uid}\n' - f' |_{self.channel}\n' + + TransportClosed, + ) as tpt_err: + ipc_borked_report: str = ( + f'IPC for actor already closed/broken?\n\n' + f'\n' + f'c)=x> {peer_id}\n' ) + match tpt_err: + case TransportClosed(): + log.debug(ipc_borked_report) + case _: + ipc_borked_report += ( + f'\n' + f'Unhandled low-level transport-closed/error during\n' + f'Portal.cancel_actor()` request?\n' + f'<{type(tpt_err).__name__}( {tpt_err} )>\n' + ) + log.warning(ipc_borked_report) + return False # TODO: do we still need this for low level `Actor`-runtime @@ -464,10 +513,13 @@ class Portal: with trio.CancelScope(shield=True): await ctx.cancel() - except trio.ClosedResourceError: + except trio.ClosedResourceError as cre: # if the far end terminates before we send a cancel the # underlying transport-channel may already be closed. - log.cancel(f'Context {ctx} was already closed?') + log.cancel( + f'Context.cancel() -> {cre!r}\n' + f'cid: {ctx.cid!r} already closed?\n' + ) # XXX: should this always be done? # await recv_chan.aclose() @@ -504,8 +556,12 @@ class LocalPortal: return it's result. ''' - obj = self.actor if ns == 'self' else importlib.import_module(ns) - func = getattr(obj, func_name) + obj = ( + self.actor + if ns == 'self' + else importlib.import_module(ns) + ) + func: Callable = getattr(obj, func_name) return await func(**kwargs) @@ -530,30 +586,30 @@ async def open_portal( assert actor was_connected: bool = False - async with maybe_open_nursery( - tn, - shield=shield, - strict_exception_groups=False, - # ^XXX^ TODO? soo roll our own then ?? - # -> since we kinda want the "if only one `.exception` then - # just raise that" interface? - ) as tn: + async with ( + collapse_eg(), + maybe_open_nursery( + tn, + shield=shield, + ) as tn, + ): if not channel.connected(): await channel.connect() was_connected = True - if channel.uid is None: - await actor._do_handshake(channel) + if channel.aid is None: + await channel._do_handshake( + aid=actor.aid, + ) msg_loop_cs: trio.CancelScope|None = None if start_msg_loop: - from ._runtime import process_messages + from . import _rpc msg_loop_cs = await tn.start( partial( - process_messages, - actor, - channel, + _rpc.process_messages, + chan=channel, # if the local task is cancelled we want to keep # the msg loop running until our block ends shield=True, diff --git a/tractor/_root.py b/tractor/_root.py index 2a9beaa3..d7b3359b 100644 --- a/tractor/_root.py +++ b/tractor/_root.py @@ -18,7 +18,9 @@ Root actor runtime ignition(s). ''' -from contextlib import asynccontextmanager as acm +from contextlib import ( + asynccontextmanager as acm, +) from functools import partial import importlib import inspect @@ -26,96 +28,55 @@ import logging import os import signal import sys -from typing import Callable +from typing import ( + Any, + Callable, +) import warnings import trio -from ._runtime import ( - Actor, - Arbiter, - # TODO: rename and make a non-actor subtype? - # Arbiter as Registry, - async_main, +from . import _runtime +from .devx import ( + debug, + _frame_stack, + pformat as _pformat, ) -from .devx import _debug from . import _spawn from . import _state from . import log -from ._ipc import _connect_chan -from ._exceptions import is_multi_cancelled - - -# set at startup and after forks -_default_host: str = '127.0.0.1' -_default_port: int = 1616 - -# default registry always on localhost -_default_lo_addrs: list[tuple[str, int]] = [( - _default_host, - _default_port, -)] +from .ipc import ( + _connect_chan, +) +from ._addr import ( + Address, + UnwrappedAddress, + default_lo_addrs, + mk_uuid, + wrap_address, +) +from .trionics import ( + is_multi_cancelled, + collapse_eg, +) +from ._exceptions import ( + RuntimeFailure, +) logger = log.get_logger('tractor') +# TODO: stick this in a `@acm` defined in `devx.debug`? +# -[ ] also maybe consider making this a `wrapt`-deco to +# save an indent level? +# @acm -async def open_root_actor( - - *, - # defaults are above - registry_addrs: list[tuple[str, int]]|None = None, - - # defaults are above - arbiter_addr: tuple[str, int]|None = None, - - name: str|None = 'root', - - # either the `multiprocessing` start method: - # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods - # OR `trio` (the new default). - start_method: _spawn.SpawnMethodKey|None = None, - - # enables the multi-process debugger support - debug_mode: bool = False, - maybe_enable_greenback: bool = True, # `.pause_from_sync()/breakpoint()` support - enable_stack_on_sig: bool = False, - - # internal logging - loglevel: str|None = None, - - enable_modules: list|None = None, - rpc_module_paths: list|None = None, - - # NOTE: allow caller to ensure that only one registry exists - # and that this call creates it. - ensure_registry: bool = False, - - hide_tb: bool = True, - - # XXX, proxied directly to `.devx._debug._maybe_enter_pm()` - # for REPL-entry logic. - debug_filter: Callable[ - [BaseException|BaseExceptionGroup], - bool, - ] = lambda err: not is_multi_cancelled(err), - - # TODO, a way for actors to augment passing derived - # read-only state to sublayers? - # extra_rt_vars: dict|None = None, - -) -> Actor: - ''' - Runtime init entry point for ``tractor``. - - ''' - _debug.hide_runtime_frames() - __tracebackhide__: bool = hide_tb - - # TODO: stick this in a `@cm` defined in `devx._debug`? - # +async def maybe_block_bp( + debug_mode: bool, + maybe_enable_greenback: bool, +) -> bool: # Override the global debugger hook to make it play nice with # ``trio``, see much discussion in: # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 @@ -124,23 +85,25 @@ async def open_root_actor( 'PYTHONBREAKPOINT', None, ) + bp_blocked: bool if ( debug_mode and maybe_enable_greenback and ( - maybe_mod := await _debug.maybe_init_greenback( + maybe_mod := await debug.maybe_init_greenback( raise_not_found=False, ) ) ): logger.info( f'Found `greenback` installed @ {maybe_mod}\n' - 'Enabling `tractor.pause_from_sync()` support!\n' + f'Enabling `tractor.pause_from_sync()` support!\n' ) os.environ['PYTHONBREAKPOINT'] = ( - 'tractor.devx._debug._sync_pause_from_builtin' + 'tractor.devx.debug._sync_pause_from_builtin' ) _state._runtime_vars['use_greenback'] = True + bp_blocked = False else: # TODO: disable `breakpoint()` by default (without @@ -159,302 +122,481 @@ async def open_root_actor( # lol ok, # https://docs.python.org/3/library/sys.html#sys.breakpointhook os.environ['PYTHONBREAKPOINT'] = "0" + bp_blocked = True - # attempt to retreive ``trio``'s sigint handler and stash it - # on our debugger lock state. - _debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) - - # mark top most level process as root actor - _state._runtime_vars['_is_root'] = True - - # caps based rpc list - enable_modules = ( - enable_modules - or - [] - ) - - if rpc_module_paths: - warnings.warn( - "`rpc_module_paths` is now deprecated, use " - " `enable_modules` instead.", - DeprecationWarning, - stacklevel=2, - ) - enable_modules.extend(rpc_module_paths) - - if start_method is not None: - _spawn.try_set_start_method(start_method) - - if arbiter_addr is not None: - warnings.warn( - '`arbiter_addr` is now deprecated\n' - 'Use `registry_addrs: list[tuple]` instead..', - DeprecationWarning, - stacklevel=2, - ) - registry_addrs = [arbiter_addr] - - registry_addrs: list[tuple[str, int]] = ( - registry_addrs - or - _default_lo_addrs - ) - assert registry_addrs - - loglevel = ( - loglevel - or log._default_loglevel - ).upper() - - if ( - debug_mode - and _spawn._spawn_method == 'trio' - ): - _state._runtime_vars['_debug_mode'] = True - - # expose internal debug module to every actor allowing for - # use of ``await tractor.pause()`` - enable_modules.append('tractor.devx._debug') - - # if debug mode get's enabled *at least* use that level of - # logging for some informative console prompts. - if ( - logging.getLevelName( - # lul, need the upper case for the -> int map? - # sweet "dynamic function behaviour" stdlib... - loglevel, - ) > logging.getLevelName('PDB') - ): - loglevel = 'PDB' - - - elif debug_mode: - raise RuntimeError( - "Debug mode is only supported for the `trio` backend!" - ) - - assert loglevel - _log = log.get_console_log(loglevel) - assert _log - - # TODO: factor this into `.devx._stackscope`!! - if ( - debug_mode - and - enable_stack_on_sig - ): - from .devx._stackscope import enable_stack_on_sig - enable_stack_on_sig() - - # closed into below ping task-func - ponged_addrs: list[tuple[str, int]] = [] - - async def ping_tpt_socket( - addr: tuple[str, int], - timeout: float = 1, - ) -> None: - ''' - Attempt temporary connection to see if a registry is - listening at the requested address by a tranport layer - ping. - - If a connection can't be made quickly we assume none no - server is listening at that addr. - - ''' - try: - # TODO: this connect-and-bail forces us to have to - # carefully rewrap TCP 104-connection-reset errors as - # EOF so as to avoid propagating cancel-causing errors - # to the channel-msg loop machinery. Likely it would - # be better to eventually have a "discovery" protocol - # with basic handshake instead? - with trio.move_on_after(timeout): - async with _connect_chan(*addr): - ponged_addrs.append(addr) - - except OSError: - # TODO: make this a "discovery" log level? - logger.info( - f'No actor registry found @ {addr}\n' - ) - - async with trio.open_nursery() as tn: - for addr in registry_addrs: - tn.start_soon( - ping_tpt_socket, - tuple(addr), # TODO: just drop this requirement? - ) - - trans_bind_addrs: list[tuple[str, int]] = [] - - # Create a new local root-actor instance which IS NOT THE - # REGISTRAR - if ponged_addrs: - if ensure_registry: - raise RuntimeError( - f'Failed to open `{name}`@{ponged_addrs}: ' - 'registry socket(s) already bound' - ) - - # we were able to connect to an arbiter - logger.info( - f'Registry(s) seem(s) to exist @ {ponged_addrs}' - ) - - actor = Actor( - name=name or 'anonymous', - registry_addrs=ponged_addrs, - loglevel=loglevel, - enable_modules=enable_modules, - ) - # DO NOT use the registry_addrs as the transport server - # addrs for this new non-registar, root-actor. - for host, port in ponged_addrs: - # NOTE: zero triggers dynamic OS port allocation - trans_bind_addrs.append((host, 0)) - - # Start this local actor as the "registrar", aka a regular - # actor who manages the local registry of "mailboxes" of - # other process-tree-local sub-actors. - else: - - # NOTE that if the current actor IS THE REGISTAR, the - # following init steps are taken: - # - the tranport layer server is bound to each (host, port) - # pair defined in provided registry_addrs, or the default. - trans_bind_addrs = registry_addrs - - # - it is normally desirable for any registrar to stay up - # indefinitely until either all registered (child/sub) - # actors are terminated (via SC supervision) or, - # a re-election process has taken place. - # NOTE: all of ^ which is not implemented yet - see: - # https://github.com/goodboy/tractor/issues/216 - # https://github.com/goodboy/tractor/pull/348 - # https://github.com/goodboy/tractor/issues/296 - - actor = Arbiter( - name or 'registrar', - registry_addrs=registry_addrs, - loglevel=loglevel, - enable_modules=enable_modules, - ) - # XXX, in case the root actor runtime was actually run from - # `tractor.to_asyncio.run_as_asyncio_guest()` and NOt - # `.trio.run()`. - actor._infected_aio = _state._runtime_vars['_is_infected_aio'] - - # Start up main task set via core actor-runtime nurseries. try: - # assign process-local actor - _state._current_actor = actor - - # start local channel-server and fake the portal API - # NOTE: this won't block since we provide the nursery - ml_addrs_str: str = '\n'.join( - f'@{addr}' for addr in trans_bind_addrs - ) - logger.info( - f'Starting local {actor.uid} on the following transport addrs:\n' - f'{ml_addrs_str}' - ) - - # start the actor runtime in a new task - async with trio.open_nursery( - strict_exception_groups=False, - # ^XXX^ TODO? instead unpack any RAE as per "loose" style? - ) as nursery: - - # ``_runtime.async_main()`` creates an internal nursery - # and blocks here until any underlying actor(-process) - # tree has terminated thereby conducting so called - # "end-to-end" structured concurrency throughout an - # entire hierarchical python sub-process set; all - # "actor runtime" primitives are SC-compat and thus all - # transitively spawned actors/processes must be as - # well. - await nursery.start( - partial( - async_main, - actor, - accept_addrs=trans_bind_addrs, - parent_addr=None - ) - ) - try: - yield actor - except ( - Exception, - BaseExceptionGroup, - ) as err: - - # TODO, in beginning to handle the subsubactor with - # crashed grandparent cases.. - # - # was_locked: bool = await _debug.maybe_wait_for_debugger( - # child_in_debug=True, - # ) - # XXX NOTE XXX see equiv note inside - # `._runtime.Actor._stream_handler()` where in the - # non-root or root-that-opened-this-mahually case we - # wait for the local actor-nursery to exit before - # exiting the transport channel handler. - entered: bool = await _debug._maybe_enter_pm( - err, - api_frame=inspect.currentframe(), - debug_filter=debug_filter, - ) - - if ( - not entered - and - not is_multi_cancelled( - err, - ) - ): - logger.exception('Root actor crashed\n') - - # ALWAYS re-raise any error bubbled up from the - # runtime! - raise - - finally: - # NOTE: not sure if we'll ever need this but it's - # possibly better for even more determinism? - # logger.cancel( - # f'Waiting on {len(nurseries)} nurseries in root..') - # nurseries = actor._actoruid2nursery.values() - # async with trio.open_nursery() as tempn: - # for an in nurseries: - # tempn.start_soon(an.exited.wait) - - logger.info( - 'Closing down root actor' - ) - await actor.cancel(None) # self cancel + yield bp_blocked finally: - _state._current_actor = None - _state._last_actor_terminated = actor + # restore any prior built-in `breakpoint()` hook state + if builtin_bp_handler is not None: + sys.breakpointhook = builtin_bp_handler + + if orig_bp_path is not None: + os.environ['PYTHONBREAKPOINT'] = orig_bp_path + + else: + # clear env back to having no entry + os.environ.pop('PYTHONBREAKPOINT', None) + + + +@acm +async def open_root_actor( + *, + # defaults are above + registry_addrs: list[UnwrappedAddress]|None = None, + + # defaults are above + arbiter_addr: tuple[UnwrappedAddress]|None = None, + + enable_transports: list[ + # TODO, this should eventually be the pairs as + # defined by (codec, proto) as on `MsgTransport. + _state.TransportProtocolKey, + ]|None = None, + + name: str|None = 'root', + + # either the `multiprocessing` start method: + # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods + # OR `trio` (the new default). + start_method: _spawn.SpawnMethodKey|None = None, + + # enables the multi-process debugger support + debug_mode: bool = False, + maybe_enable_greenback: bool = False, # `.pause_from_sync()/breakpoint()` support + # ^XXX NOTE^ the perf implications of use, + # https://greenback.readthedocs.io/en/latest/principle.html#performance + enable_stack_on_sig: bool = False, + + # internal logging + loglevel: str|None = None, + + enable_modules: list|None = None, + rpc_module_paths: list|None = None, + + # NOTE: allow caller to ensure that only one registry exists + # and that this call creates it. + ensure_registry: bool = False, + + hide_tb: bool = True, + + # XXX, proxied directly to `.devx.debug._maybe_enter_pm()` + # for REPL-entry logic. + debug_filter: Callable[ + [BaseException|BaseExceptionGroup], + bool, + ] = lambda err: not is_multi_cancelled(err), + + # TODO, a way for actors to augment passing derived + # read-only state to sublayers? + # extra_rt_vars: dict|None = None, + +) -> _runtime.Actor: + ''' + Initialize the `tractor` runtime by starting a "root actor" in + a parent-most Python process. + + All (disjoint) actor-process-trees-as-programs are created via + this entrypoint. + + ''' + # XXX NEVER allow nested actor-trees! + if already_actor := _state.current_actor( + err_on_no_runtime=False, + ): + rtvs: dict[str, Any] = _state._runtime_vars + root_mailbox: list[str, int] = rtvs['_root_mailbox'] + registry_addrs: list[list[str, int]] = rtvs['_registry_addrs'] + raise RuntimeFailure( + f'A current actor already exists !?\n' + f'({already_actor}\n' + f'\n' + f'You can NOT open a second root actor from within ' + f'an existing tree and the current root of this ' + f'already exists !!\n' + f'\n' + f'_root_mailbox: {root_mailbox!r}\n' + f'_registry_addrs: {registry_addrs!r}\n' + ) + + async with maybe_block_bp( + debug_mode=debug_mode, + maybe_enable_greenback=maybe_enable_greenback, + ): + if enable_transports is None: + enable_transports: list[str] = _state.current_ipc_protos() + else: + _state._runtime_vars['_enable_tpts'] = enable_transports + + # TODO! support multi-tpts per actor! + # Bo + if not len(enable_transports) == 1: + raise RuntimeError( + f'No multi-tpt support yet!\n' + f'enable_transports={enable_transports!r}\n' + ) + + _frame_stack.hide_runtime_frames() + __tracebackhide__: bool = hide_tb + + # attempt to retreive ``trio``'s sigint handler and stash it + # on our debugger lock state. + debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) + + # mark top most level process as root actor + _state._runtime_vars['_is_root'] = True + + # caps based rpc list + enable_modules = ( + enable_modules + or + [] + ) + + if rpc_module_paths: + warnings.warn( + "`rpc_module_paths` is now deprecated, use " + " `enable_modules` instead.", + DeprecationWarning, + stacklevel=2, + ) + enable_modules.extend(rpc_module_paths) + + if start_method is not None: + _spawn.try_set_start_method(start_method) + + # TODO! remove this ASAP! + if arbiter_addr is not None: + warnings.warn( + '`arbiter_addr` is now deprecated\n' + 'Use `registry_addrs: list[tuple]` instead..', + DeprecationWarning, + stacklevel=2, + ) + uw_reg_addrs = [arbiter_addr] + + uw_reg_addrs = registry_addrs + if not uw_reg_addrs: + uw_reg_addrs: list[UnwrappedAddress] = default_lo_addrs( + enable_transports + ) + + # must exist by now since all below code is dependent + assert uw_reg_addrs + registry_addrs: list[Address] = [ + wrap_address(uw_addr) + for uw_addr in uw_reg_addrs + ] + + loglevel = ( + loglevel + or log._default_loglevel + ).upper() - # restore built-in `breakpoint()` hook state if ( debug_mode and - maybe_enable_greenback + _spawn._spawn_method == 'trio' ): - if builtin_bp_handler is not None: - sys.breakpointhook = builtin_bp_handler + _state._runtime_vars['_debug_mode'] = True - if orig_bp_path is not None: - os.environ['PYTHONBREAKPOINT'] = orig_bp_path + # expose internal debug module to every actor allowing for + # use of ``await tractor.pause()`` + enable_modules.append('tractor.devx.debug._tty_lock') - else: - # clear env back to having no entry - os.environ.pop('PYTHONBREAKPOINT', None) + # if debug mode get's enabled *at least* use that level of + # logging for some informative console prompts. + if ( + logging.getLevelName( + # lul, need the upper case for the -> int map? + # sweet "dynamic function behaviour" stdlib... + loglevel, + ) > logging.getLevelName('PDB') + ): + loglevel = 'PDB' - logger.runtime("Root actor terminated") + + elif debug_mode: + raise RuntimeError( + "Debug mode is only supported for the `trio` backend!" + ) + + assert loglevel + _log = log.get_console_log(loglevel) + assert _log + + # TODO: factor this into `.devx._stackscope`!! + if ( + debug_mode + and + enable_stack_on_sig + ): + from .devx._stackscope import enable_stack_on_sig + enable_stack_on_sig() + + # closed into below ping task-func + ponged_addrs: list[Address] = [] + + async def ping_tpt_socket( + addr: Address, + timeout: float = 1, + ) -> None: + ''' + Attempt temporary connection to see if a registry is + listening at the requested address by a tranport layer + ping. + + If a connection can't be made quickly we assume none no + server is listening at that addr. + + ''' + try: + # TODO: this connect-and-bail forces us to have to + # carefully rewrap TCP 104-connection-reset errors as + # EOF so as to avoid propagating cancel-causing errors + # to the channel-msg loop machinery. Likely it would + # be better to eventually have a "discovery" protocol + # with basic handshake instead? + with trio.move_on_after(timeout): + async with _connect_chan(addr.unwrap()): + ponged_addrs.append(addr) + + except OSError: + # ?TODO, make this a "discovery" log level? + logger.info( + f'No root-actor registry found @ {addr!r}\n' + ) + + # !TODO, this is basically just another (abstract) + # happy-eyeballs, so we should try for formalize it somewhere + # in a `.[_]discovery` ya? + # + async with trio.open_nursery() as tn: + for uw_addr in uw_reg_addrs: + addr: Address = wrap_address(uw_addr) + tn.start_soon( + ping_tpt_socket, + addr, + ) + + trans_bind_addrs: list[UnwrappedAddress] = [] + + # Create a new local root-actor instance which IS NOT THE + # REGISTRAR + if ponged_addrs: + if ensure_registry: + raise RuntimeError( + f'Failed to open `{name}`@{ponged_addrs}: ' + 'registry socket(s) already bound' + ) + + # we were able to connect to an arbiter + logger.info( + f'Registry(s) seem(s) to exist @ {ponged_addrs}' + ) + + actor = _runtime.Actor( + name=name or 'anonymous', + uuid=mk_uuid(), + registry_addrs=ponged_addrs, + loglevel=loglevel, + enable_modules=enable_modules, + ) + # **DO NOT** use the registry_addrs as the + # ipc-transport-server's bind-addrs as this is + # a new NON-registrar, ROOT-actor. + # + # XXX INSTEAD, bind random addrs using the same tpt + # proto. + for addr in ponged_addrs: + trans_bind_addrs.append( + addr.get_random( + bindspace=addr.bindspace, + ) + ) + + # Start this local actor as the "registrar", aka a regular + # actor who manages the local registry of "mailboxes" of + # other process-tree-local sub-actors. + else: + # NOTE that if the current actor IS THE REGISTAR, the + # following init steps are taken: + # - the tranport layer server is bound to each addr + # pair defined in provided registry_addrs, or the default. + trans_bind_addrs = uw_reg_addrs + + # - it is normally desirable for any registrar to stay up + # indefinitely until either all registered (child/sub) + # actors are terminated (via SC supervision) or, + # a re-election process has taken place. + # NOTE: all of ^ which is not implemented yet - see: + # https://github.com/goodboy/tractor/issues/216 + # https://github.com/goodboy/tractor/pull/348 + # https://github.com/goodboy/tractor/issues/296 + + # TODO: rename as `RootActor` or is that even necessary? + actor = _runtime.Arbiter( + name=name or 'registrar', + uuid=mk_uuid(), + registry_addrs=registry_addrs, + loglevel=loglevel, + enable_modules=enable_modules, + ) + # XXX, in case the root actor runtime was actually run from + # `tractor.to_asyncio.run_as_asyncio_guest()` and NOt + # `.trio.run()`. + actor._infected_aio = _state._runtime_vars['_is_infected_aio'] + + # NOTE, only set the loopback addr for the + # process-tree-global "root" mailbox since all sub-actors + # should be able to speak to their root actor over that + # channel. + raddrs: list[Address] = _state._runtime_vars['_root_addrs'] + raddrs.extend(trans_bind_addrs) + # TODO, remove once we have also removed all usage; + # eventually all (root-)registry apis should expect > 1 addr. + _state._runtime_vars['_root_mailbox'] = raddrs[0] + + # Start up main task set via core actor-runtime nurseries. + try: + # assign process-local actor + _state._current_actor = actor + + # start local channel-server and fake the portal API + # NOTE: this won't block since we provide the nursery + report: str = f'Starting actor-runtime for {actor.aid.reprol()!r}\n' + if reg_addrs := actor.registry_addrs: + report += ( + '-> Opening new registry @ ' + + + '\n'.join( + f'{addr}' for addr in reg_addrs + ) + ) + logger.info(f'{report}\n') + + # start runtime in a bg sub-task, yield to caller. + async with ( + collapse_eg(), + trio.open_nursery() as root_tn, + + # ?TODO? finally-footgun below? + # -> see note on why shielding. + # maybe_raise_from_masking_exc(), + ): + actor._root_tn = root_tn + # `_runtime.async_main()` creates an internal nursery + # and blocks here until any underlying actor(-process) + # tree has terminated thereby conducting so called + # "end-to-end" structured concurrency throughout an + # entire hierarchical python sub-process set; all + # "actor runtime" primitives are SC-compat and thus all + # transitively spawned actors/processes must be as + # well. + await root_tn.start( + partial( + _runtime.async_main, + actor, + accept_addrs=trans_bind_addrs, + parent_addr=None + ) + ) + try: + yield actor + except ( + Exception, + BaseExceptionGroup, + ) as err: + + # TODO, in beginning to handle the subsubactor with + # crashed grandparent cases.. + # + # was_locked: bool = await debug.maybe_wait_for_debugger( + # child_in_debug=True, + # ) + # XXX NOTE XXX see equiv note inside + # `._runtime.Actor._stream_handler()` where in the + # non-root or root-that-opened-this-mahually case we + # wait for the local actor-nursery to exit before + # exiting the transport channel handler. + entered: bool = await debug._maybe_enter_pm( + err, + api_frame=inspect.currentframe(), + debug_filter=debug_filter, + + # XXX NOTE, required to debug root-actor + # crashes under cancellation conditions; so + # most of them! + shield=root_tn.cancel_scope.cancel_called, + ) + + if ( + not entered + and + not is_multi_cancelled( + err, + ) + ): + logger.exception( + 'Root actor crashed\n' + f'>x)\n' + f' |_{actor}\n' + ) + + # ALWAYS re-raise any error bubbled up from the + # runtime! + raise + + finally: + # NOTE/TODO?, not sure if we'll ever need this but it's + # possibly better for even more determinism? + # logger.cancel( + # f'Waiting on {len(nurseries)} nurseries in root..') + # nurseries = actor._actoruid2nursery.values() + # async with trio.open_nursery() as tempn: + # for an in nurseries: + # tempn.start_soon(an.exited.wait) + + op_nested_actor_repr: str = _pformat.nest_from_op( + input_op='>) ', + text=actor.pformat(), + nest_prefix='|_', + ) + logger.info( + f'Closing down root actor\n' + f'{op_nested_actor_repr}' + ) + # XXX, THIS IS A *finally-footgun*! + # (also mentioned in with-block above) + # -> though already shields iternally it can + # taskc here and mask underlying errors raised in + # the try-block above? + with trio.CancelScope(shield=True): + await actor.cancel(None) # self cancel + finally: + # revert all process-global runtime state + if ( + debug_mode + and + _spawn._spawn_method == 'trio' + ): + _state._runtime_vars['_debug_mode'] = False + + _state._current_actor = None + _state._last_actor_terminated = actor + + sclang_repr: str = _pformat.nest_from_op( + input_op=')>', + text=actor.pformat(), + nest_prefix='|_', + nest_indent=1, + ) + + logger.info( + f'Root actor terminated\n' + f'{sclang_repr}' + ) def run_daemon( @@ -462,7 +604,7 @@ def run_daemon( # runtime kwargs name: str | None = 'root', - registry_addrs: list[tuple[str, int]] = _default_lo_addrs, + registry_addrs: list[UnwrappedAddress]|None = None, start_method: str | None = None, debug_mode: bool = False, diff --git a/tractor/_rpc.py b/tractor/_rpc.py index c5daed9e..573aa77b 100644 --- a/tractor/_rpc.py +++ b/tractor/_rpc.py @@ -37,12 +37,13 @@ import warnings import trio from trio import ( + Cancelled, CancelScope, Nursery, TaskStatus, ) -from ._ipc import Channel +from .ipc import Channel from ._context import ( Context, ) @@ -52,13 +53,18 @@ from ._exceptions import ( ModuleNotExposed, MsgTypeError, TransportClosed, - is_multi_cancelled, pack_error, unpack_error, ) +from .trionics import ( + collapse_eg, + is_multi_cancelled, + maybe_raise_from_masking_exc, +) from .devx import ( - _debug, + debug, add_div, + pformat as _pformat, ) from . import _state from .log import get_logger @@ -67,7 +73,7 @@ from .msg import ( MsgCodec, PayloadT, NamespacePath, - # pretty_struct, + pretty_struct, _ops as msgops, ) from tractor.msg.types import ( @@ -215,11 +221,18 @@ async def _invoke_non_context( task_status.started(ctx) result = await coro fname: str = func.__name__ + + op_nested_task: str = _pformat.nest_from_op( + input_op=f')> cid: {ctx.cid!r}', + text=f'{ctx._task}', + nest_indent=1, # under > + ) log.runtime( - 'RPC complete:\n' - f'task: {ctx._task}\n' - f'|_cid={ctx.cid}\n' - f'|_{fname}() -> {pformat(result)}\n' + f'RPC task complete\n' + f'\n' + f'{op_nested_task}\n' + f'\n' + f')> {fname}() -> {pformat(result)}\n' ) # NOTE: only send result if we know IPC isn't down @@ -250,7 +263,7 @@ async def _errors_relayed_via_ipc( ctx: Context, is_rpc: bool, - hide_tb: bool = False, + hide_tb: bool = True, debug_kbis: bool = False, task_status: TaskStatus[ Context | BaseException @@ -266,7 +279,7 @@ async def _errors_relayed_via_ipc( # TODO: a debug nursery when in debug mode! # async with maybe_open_debugger_nursery() as debug_tn: - # => see matching comment in side `._debug._pause()` + # => see matching comment in side `.debug._pause()` rpc_err: BaseException|None = None try: yield # run RPC invoke body @@ -318,7 +331,7 @@ async def _errors_relayed_via_ipc( 'RPC task crashed, attempting to enter debugger\n' f'|_{ctx}' ) - entered_debug = await _debug._maybe_enter_pm( + entered_debug = await debug._maybe_enter_pm( err, api_frame=inspect.currentframe(), ) @@ -371,13 +384,13 @@ async def _errors_relayed_via_ipc( # RPC task bookeeping. # since RPC tasks are scheduled inside a flat - # `Actor._service_n`, we add "handles" to each such that + # `Actor._service_tn`, we add "handles" to each such that # they can be individually ccancelled. finally: - # if the error is not from user code and instead a failure - # of a runtime RPC or transport failure we do prolly want to - # show this frame + # if the error is not from user code and instead a failure of + # an internal-runtime-RPC or IPC-connection, we do (prolly) want + # to show this frame! if ( rpc_err and ( @@ -449,7 +462,7 @@ async def _invoke( connected IPC channel. This is the core "RPC" `trio.Task` scheduling machinery used to start every - remotely invoked function, normally in `Actor._service_n: Nursery`. + remotely invoked function, normally in `Actor._service_tn: Nursery`. ''' __tracebackhide__: bool = hide_tb @@ -462,7 +475,7 @@ async def _invoke( ): # XXX for .pause_from_sync()` usage we need to make sure # `greenback` is boostrapped in the subactor! - await _debug.maybe_init_greenback() + await debug.maybe_init_greenback() # TODO: possibly a specially formatted traceback # (not sure what typing is for this..)? @@ -616,32 +629,40 @@ async def _invoke( # -> the below scope is never exposed to the # `@context` marked RPC function. # - `._portal` is never set. + scope_err: BaseException|None = None try: - tn: trio.Nursery + # TODO: better `trionics` primitive/tooling usage here! + # -[ ] should would be nice to have our `TaskMngr` + # nursery here! + # -[ ] payload value checking like we do with + # `.started()` such that the debbuger can engage + # here in the child task instead of waiting for the + # parent to crash with it's own MTE.. + # + tn: Nursery rpc_ctx_cs: CancelScope async with ( - trio.open_nursery( - strict_exception_groups=False, - # ^XXX^ TODO? instead unpack any RAE as per "loose" style? - - ) as tn, + collapse_eg(hide_tb=False), + trio.open_nursery() as tn, msgops.maybe_limit_plds( ctx=ctx, spec=ctx_meta.get('pld_spec'), dec_hook=ctx_meta.get('dec_hook'), ), + + # XXX NOTE, this being the "most embedded" + # scope ensures unasking of the `await coro` below + # *should* never be interfered with!! + maybe_raise_from_masking_exc( + tn=tn, + unmask_from=Cancelled, + ) as _mbme, # maybe boxed masked exc ): ctx._scope_nursery = tn rpc_ctx_cs = ctx._scope = tn.cancel_scope task_status.started(ctx) - # TODO: better `trionics` tooling: - # -[ ] should would be nice to have our `TaskMngr` - # nursery here! - # -[ ] payload value checking like we do with - # `.started()` such that the debbuger can engage - # here in the child task instead of waiting for the - # parent to crash with it's own MTE.. + # invoke user endpoint fn. res: Any|PayloadT = await coro return_msg: Return|CancelAck = return_msg_type( cid=cid, @@ -651,7 +672,8 @@ async def _invoke( ctx._result = res log.runtime( f'Sending result msg and exiting {ctx.side!r}\n' - f'{return_msg}\n' + f'\n' + f'{pretty_struct.pformat(return_msg)}\n' ) await chan.send(return_msg) @@ -743,43 +765,52 @@ async def _invoke( BaseExceptionGroup, BaseException, trio.Cancelled, - - ) as scope_error: + ) as _scope_err: + scope_err = _scope_err if ( - isinstance(scope_error, RuntimeError) - and scope_error.args - and 'Cancel scope stack corrupted' in scope_error.args[0] + isinstance(scope_err, RuntimeError) + and + scope_err.args + and + 'Cancel scope stack corrupted' in scope_err.args[0] ): log.exception('Cancel scope stack corrupted!?\n') - # _debug.mk_pdb().set_trace() + # debug.mk_pdb().set_trace() # always set this (child) side's exception as the # local error on the context - ctx._local_error: BaseException = scope_error + ctx._local_error: BaseException = scope_err # ^-TODO-^ question, # does this matter other then for # consistentcy/testing? # |_ no user code should be in this scope at this point # AND we already set this in the block below? - # if a remote error was set then likely the - # exception group was raised due to that, so + # XXX if a remote error was set then likely the + # exc group was raised due to that, so # and we instead raise that error immediately! - ctx.maybe_raise() + maybe_re: ( + ContextCancelled|RemoteActorError + ) = ctx.maybe_raise() + if maybe_re: + log.cancel( + f'Suppressing remote-exc from peer,\n' + f'{maybe_re!r}\n' + ) # maybe TODO: pack in come kinda # `trio.Cancelled.__traceback__` here so they can be # unwrapped and displayed on the caller side? no se.. - raise + raise scope_err # `@context` entrypoint task bookeeping. # i.e. only pop the context tracking if used ;) finally: - assert chan.uid + assert chan.aid # don't pop the local context until we know the # associated child isn't in debug any more - await _debug.maybe_wait_for_debugger() + await debug.maybe_wait_for_debugger() ctx: Context = actor._contexts.pop(( chan.uid, cid, @@ -792,26 +823,49 @@ async def _invoke( f'after having {ctx.repr_state!r}\n' ) if merr: - logmeth: Callable = log.error - if isinstance(merr, ContextCancelled): - logmeth: Callable = log.runtime + if ( + # ctxc: by `Context.cancel()` + isinstance(merr, ContextCancelled) - if not isinstance(merr, RemoteActorError): - tb_str: str = ''.join(traceback.format_exception(merr)) + # out-of-layer cancellation, one of: + # - actorc: by `Portal.cancel_actor()` + # - OSc: by SIGINT or `Process.signal()` + or ( + isinstance(merr, trio.Cancelled) + and + ctx.canceller + ) + ): + logmeth: Callable = log.cancel + descr_str += ( + f' with {merr!r}\n' + ) + + elif ( + not isinstance(merr, RemoteActorError) + ): + tb_str: str = ''.join( + traceback.format_exception(merr) + ) descr_str += ( f'\n{merr!r}\n' # needed? f'{tb_str}\n' ) else: - descr_str += f'\n{merr!r}\n' + descr_str += ( + f'{merr!r}\n' + ) else: - descr_str += f'\nand final result {ctx.outcome!r}\n' + descr_str += ( + f'\n' + f'with final result {ctx.outcome!r}\n' + ) logmeth( - message - + - descr_str + f'{message}\n' + f'\n' + f'{descr_str}\n' ) @@ -869,7 +923,6 @@ async def try_ship_error_to_remote( async def process_messages( - actor: Actor, chan: Channel, shield: bool = False, task_status: TaskStatus[CancelScope] = trio.TASK_STATUS_IGNORED, @@ -883,7 +936,7 @@ async def process_messages( Receive (multiplexed) per-`Channel` RPC requests as msgs from remote processes; schedule target async funcs as local - `trio.Task`s inside the `Actor._service_n: Nursery`. + `trio.Task`s inside the `Actor._service_tn: Nursery`. Depending on msg type, non-`cmd` (task spawning/starting) request payloads (eg. `started`, `yield`, `return`, `error`) @@ -907,7 +960,8 @@ async def process_messages( (as utilized inside `Portal.cancel_actor()` ). ''' - assert actor._service_n # runtime state sanity + actor: Actor = _state.current_actor() + assert actor._service_tn # runtime state sanity # TODO: once `trio` get's an "obvious way" for req/resp we # should use it? @@ -978,12 +1032,10 @@ async def process_messages( cid=cid, kwargs=kwargs, ): - kwargs |= {'req_chan': chan} - # XXX NOTE XXX don't start entire actor # runtime cancellation if this actor is # currently in debug mode! - pdb_complete: trio.Event|None = _debug.DebugStatus.repl_release + pdb_complete: trio.Event|None = debug.DebugStatus.repl_release if pdb_complete: await pdb_complete.wait() @@ -998,14 +1050,14 @@ async def process_messages( cid, chan, actor.cancel, - kwargs, + kwargs | {'req_chan': chan}, is_rpc=False, return_msg_type=CancelAck, ) log.runtime( - 'Cancelling IPC transport msg-loop with peer:\n' - f'|_{chan}\n' + 'Cancelling RPC-msg-loop with peer\n' + f'->c}} {chan.aid.reprol()}@[{chan.maddr}]\n' ) loop_cs.cancel() break @@ -1018,7 +1070,7 @@ async def process_messages( ): target_cid: str = kwargs['cid'] kwargs |= { - 'requesting_uid': chan.uid, + 'requesting_aid': chan.aid, 'ipc_msg': msg, # XXX NOTE! ONLY the rpc-task-owning @@ -1054,21 +1106,34 @@ async def process_messages( ns=ns, func=funcname, kwargs=kwargs, # type-spec this? see `msg.types` - uid=actorid, + uid=actor_uuid, ): + if actor_uuid != chan.aid.uid: + raise RuntimeError( + f'IPC msg <-> chan.aid mismatch!?\n' + f'Channel.aid = {chan.aid!r}\n' + f'Start.uid = {actor_uuid!r}\n' + ) + # await debug.pause() + op_repr: str = 'Start <=) ' + req_repr: str = _pformat.nest_from_op( + input_op=op_repr, + op_suffix='', + nest_prefix='', + text=f'{chan}', + + nest_indent=len(op_repr)-1, + rm_from_first_ln='<', + # ^XXX, subtract -1 to account for + # > {actor.uid}\n' - f' |_{actor}\n' - f' -> nsp: `{ns}.{funcname}({kwargs})`\n' - - # f' |_{ns}.{funcname}({kwargs})\n\n' - - # f'{pretty_struct.pformat(msg)}\n' + 'Handling RPC request\n' + f'{req_repr}\n' + f'\n' + f'->{{ ipc-context-id: {cid!r}\n' + f'->{{ nsp for fn: `{ns}.{funcname}({kwargs})`\n' ) # runtime-internal endpoint: `Actor.` @@ -1097,10 +1162,6 @@ async def process_messages( await chan.send(err_msg) continue - start_status += ( - f' -> func: {func}\n' - ) - # schedule a task for the requested RPC function # in the actor's main "service nursery". # @@ -1108,10 +1169,10 @@ async def process_messages( # supervision isolation? would avoid having to # manage RPC tasks individually in `._rpc_tasks` # table? - start_status += ' -> scheduling new task..\n' + start_status += '->( scheduling new task..\n' log.runtime(start_status) try: - ctx: Context = await actor._service_n.start( + ctx: Context = await actor._service_tn.start( partial( _invoke, actor, @@ -1156,7 +1217,7 @@ async def process_messages( trio.Event(), ) - # runtime-scoped remote (internal) error + # XXX RUNTIME-SCOPED! remote (likely internal) error # (^- bc no `Error.cid` -^) # # NOTE: this is the non-rpc error case, that @@ -1192,12 +1253,24 @@ async def process_messages( # END-OF `async for`: # IPC disconnected via `trio.EndOfChannel`, likely # due to a (graceful) `Channel.aclose()`. + + chan_op_repr: str = '<=x] ' + chan_repr: str = _pformat.nest_from_op( + input_op=chan_op_repr, + op_suffix='', + nest_prefix='', + text=chan.pformat(), + nest_indent=len(chan_op_repr)-1, + rm_from_first_ln='<', + ) log.runtime( - f'channel for {chan.uid} disconnected, cancelling RPC tasks\n' - f'|_{chan}\n' + f'IPC channel disconnected\n' + f'{chan_repr}\n' + f'\n' + f'->c) cancelling RPC tasks.\n' ) await actor.cancel_rpc_tasks( - req_uid=actor.uid, + req_aid=actor.aid, # a "self cancel" in terms of the lifetime of the # IPC connection which is presumed to be the # source of any requests for spawned tasks. @@ -1219,8 +1292,10 @@ async def process_messages( # -[ ] figure out how this will break with other transports? tc.report_n_maybe_raise( message=( - f'peer IPC channel closed abruptly?\n\n' - f'<=x {chan}\n' + f'peer IPC channel closed abruptly?\n' + f'\n' + f'<=x[\n' + f' {chan}\n' f' |_{chan.raddr}\n\n' ) + @@ -1237,7 +1312,7 @@ async def process_messages( ) as err: if nursery_cancelled_before_task: - sn: Nursery = actor._service_n + sn: Nursery = actor._service_tn assert sn and sn.cancel_scope.cancel_called # sanity log.cancel( f'Service nursery cancelled before it handled {funcname}' @@ -1267,13 +1342,37 @@ async def process_messages( finally: # msg debugging for when he machinery is brokey if msg is None: - message: str = 'Exiting IPC msg loop without receiving a msg?' + message: str = 'Exiting RPC-loop without receiving a msg?' else: + task_op_repr: str = ')>' + task: trio.Task = trio.lowlevel.current_task() + + # maybe add cancelled opt prefix + if task._cancel_status.effectively_cancelled: + task_op_repr = 'c' + task_op_repr + + task_repr: str = _pformat.nest_from_op( + input_op=task_op_repr, + text=f'{task!r}', + nest_indent=1, + ) + # chan_op_repr: str = '<=} ' + # chan_repr: str = _pformat.nest_from_op( + # input_op=chan_op_repr, + # op_suffix='', + # nest_prefix='', + # text=chan.pformat(), + # nest_indent=len(chan_op_repr)-1, + # rm_from_first_ln='<', + # ) message: str = ( - 'Exiting IPC msg loop with final msg\n\n' - f'<= peer: {chan.uid}\n' - f' |_{chan}\n\n' - # f'{pretty_struct.pformat(msg)}' + f'Exiting RPC-loop with final msg\n' + f'\n' + # f'{chan_repr}\n' + f'{task_repr}\n' + f'\n' + f'{pretty_struct.pformat(msg)}' + f'\n' ) log.runtime(message) diff --git a/tractor/_runtime.py b/tractor/_runtime.py index 890a690a..f18e0d61 100644 --- a/tractor/_runtime.py +++ b/tractor/_runtime.py @@ -35,26 +35,36 @@ for running all lower level spawning, supervision and msging layers: SC-transitive RPC via scheduling of `trio` tasks. - registration of newly spawned actors with the discovery sys. +Glossary: +-------- + - tn: a `trio.Nursery` or "task nursery". + - an: an `ActorNursery` or "actor nursery". + - root: top/parent-most scope/task/process/actor (or other runtime + primitive) in a hierarchical tree. + - parent-ish: "higher-up" in the runtime-primitive hierarchy. + - child-ish: "lower-down" in the runtime-primitive hierarchy. + ''' from __future__ import annotations from contextlib import ( ExitStack, ) -from collections import defaultdict from functools import partial -from itertools import chain import importlib import importlib.util import os +from pathlib import Path from pprint import pformat import signal import sys from typing import ( Any, Callable, + Type, TYPE_CHECKING, ) import uuid +import textwrap from types import ModuleType import warnings @@ -73,7 +83,22 @@ from tractor.msg import ( pretty_struct, types as msgtypes, ) -from ._ipc import Channel +from .trionics import ( + collapse_eg, + maybe_open_nursery, +) +from .ipc import ( + Channel, + # IPCServer, # causes cycles atm.. + _server, +) +from ._addr import ( + UnwrappedAddress, + Address, + # default_lo_addrs, + get_address_cls, + wrap_address, +) from ._context import ( mk_context, Context, @@ -85,18 +110,16 @@ from ._exceptions import ( ModuleNotExposed, MsgTypeError, unpack_error, - TransportClosed, ) -from .devx import _debug +from .devx import ( + debug, + pformat as _pformat +) from ._discovery import get_registry from ._portal import Portal from . import _state from . import _mp_fixup_main -from ._rpc import ( - process_messages, - try_ship_error_to_remote, -) - +from . import _rpc if TYPE_CHECKING: from ._supervise import ActorNursery @@ -106,8 +129,22 @@ if TYPE_CHECKING: log = get_logger('tractor') -def _get_mod_abspath(module): - return os.path.abspath(module.__file__) +def _get_mod_abspath(module: ModuleType) -> Path: + return Path(module.__file__).absolute() + + +def get_mod_nsps2fps(mod_ns_paths: list[str]) -> dict[str, str]: + ''' + Deliver a table of py module namespace-path-`str`s mapped to + their "physical" `.py` file paths in the file-sys. + + ''' + nsp2fp: dict[str, str] = {} + for nsp in mod_ns_paths: + mod: ModuleType = importlib.import_module(nsp) + nsp2fp[nsp] = str(_get_mod_abspath(mod)) + + return nsp2fp class Actor: @@ -146,19 +183,27 @@ class Actor: msg_buffer_size: int = 2**6 - # nursery placeholders filled in by `async_main()` after fork - _root_n: Nursery|None = None - _service_n: Nursery|None = None - _server_n: Nursery|None = None + # nursery placeholders filled in by `async_main()`, + # - after fork for subactors. + # - during boot for the root actor. + _root_tn: Nursery|None = None + _service_tn: Nursery|None = None + _ipc_server: _server.IPCServer|None = None + + @property + def ipc_server(self) -> _server.IPCServer: + ''' + The IPC transport-server for this actor; normally + a process-singleton. + + ''' + return self._ipc_server # Information about `__main__` from parent _parent_main_data: dict[str, str] _parent_chan_cs: CancelScope|None = None _spawn_spec: msgtypes.SpawnSpec|None = None - # syncs for setup/teardown sequences - _server_down: trio.Event|None = None - # if started on ``asycio`` running ``trio`` in guest mode _infected_aio: bool = False @@ -175,15 +220,15 @@ class Actor: def __init__( self, name: str, + uuid: str, *, enable_modules: list[str] = [], - uid: str|None = None, loglevel: str|None = None, - registry_addrs: list[tuple[str, int]]|None = None, + registry_addrs: list[Address]|None = None, spawn_method: str|None = None, # TODO: remove! - arbiter_addr: tuple[str, int]|None = None, + arbiter_addr: UnwrappedAddress|None = None, ) -> None: ''' @@ -191,27 +236,30 @@ class Actor: phase (aka before a new process is executed). ''' - self.name = name - self.uid = ( - name, - uid or str(uuid.uuid4()) + self._aid = msgtypes.Aid( + name=name, + uuid=uuid, + pid=os.getpid(), ) + self._task: trio.Task|None = None + # state self._cancel_complete = trio.Event() - self._cancel_called_by_remote: tuple[str, tuple]|None = None + self._cancel_called_by: tuple[str, tuple]|None = None self._cancel_called: bool = False # retreive and store parent `__main__` data which # will be passed to children self._parent_main_data = _mp_fixup_main._mp_figure_out_main() + # TODO? only add this when `is_debug_mode() == True` no? # always include debugging tools module - enable_modules.append('tractor.devx._debug') + if _state.is_root_process(): + enable_modules.append('tractor.devx.debug._tty_lock') - self.enable_modules: dict[str, str] = {} - for name in enable_modules: - mod: ModuleType = importlib.import_module(name) - self.enable_modules[name] = _get_mod_abspath(mod) + self.enable_modules: dict[str, str] = get_mod_nsps2fps( + mod_ns_paths=enable_modules, + ) self._mods: dict[str, ModuleType] = {} self.loglevel: str = loglevel @@ -219,25 +267,18 @@ class Actor: if arbiter_addr is not None: warnings.warn( '`Actor(arbiter_addr=)` is now deprecated.\n' - 'Use `registry_addrs: list[tuple]` instead.', + 'Use `registry_addrs: list[Address]` instead.', DeprecationWarning, stacklevel=2, ) - registry_addrs: list[tuple[str, int]] = [arbiter_addr] + + registry_addrs: list[Address] = [wrap_address(arbiter_addr)] # marked by the process spawning backend at startup # will be None for the parent most process started manually # by the user (currently called the "arbiter") self._spawn_method: str = spawn_method - self._peers: defaultdict[ - str, # uaid - list[Channel], # IPC conns from peer - ] = defaultdict(list) - self._peer_connected: dict[tuple[str, str], trio.Event] = {} - self._no_more_peers = trio.Event() - self._no_more_peers.set() - # RPC state self._ongoing_rpc_tasks = trio.Event() self._ongoing_rpc_tasks.set() @@ -256,7 +297,6 @@ class Actor: Context ] = {} - self._listeners: list[trio.abc.Listener] = [] self._parent_chan: Channel|None = None self._forkserver_info: tuple|None = None @@ -269,16 +309,188 @@ class Actor: # when provided, init the registry addresses property from # input via the validator. - self._reg_addrs: list[tuple[str, int]] = [] + self._reg_addrs: list[UnwrappedAddress] = [] if registry_addrs: - self.reg_addrs: list[tuple[str, int]] = registry_addrs - _state._runtime_vars['_registry_addrs'] = registry_addrs + _state._runtime_vars['_registry_addrs'] = self.reg_addrs = [ + addr.unwrap() + for addr in registry_addrs + ] @property - def reg_addrs(self) -> list[tuple[str, int]]: + def aid(self) -> msgtypes.Aid: + ''' + This process-singleton-actor's "unique actor ID" in struct form. + + See the `tractor.msg.Aid` struct for details. + + ''' + return self._aid + + @property + def name(self) -> str: + return self._aid.name + + @property + def uid(self) -> tuple[str, str]: + ''' + This process-singleton's "unique (cross-host) ID". + + Delivered from the `.Aid.name/.uuid` fields as a `tuple` pair + and should be multi-host unique despite a large distributed + process plane. + + ''' + msg: str = ( + f'`{type(self).__name__}.uid` is now deprecated.\n' + 'Use the new `.aid: tractor.msg.Aid` (struct) instead ' + 'which also provides additional named (optional) fields ' + 'beyond just the `.name` and `.uuid`.' + ) + warnings.warn( + msg, + DeprecationWarning, + stacklevel=2, + ) + return ( + self._aid.name, + self._aid.uuid, + ) + + @property + def pid(self) -> int: + return self._aid.pid + + @property + def repr_state(self) -> str: + if self.cancel_complete: + return 'cancelled' + + elif canceller := self.cancel_caller: + return f' and cancel-called by {canceller}' + + else: + return 'running' + + def pformat( + self, + ds: str = ': ', + indent: int = 0, + privates: bool = False, + ) -> str: + + fmtstr: str = f'|_id: {self.aid.reprol()!r}\n' + if privates: + aid_nest_prefix: str = '|_aid=' + aid_field_repr: str = _pformat.nest_from_op( + input_op='', + text=pretty_struct.pformat( + struct=self.aid, + field_indent=2, + ), + op_suffix='', + nest_prefix=aid_nest_prefix, + nest_indent=0, + ) + fmtstr: str = f'{aid_field_repr}' + + if rent_chan := self._parent_chan: + fmtstr += ( + f"|_parent{ds}{rent_chan.aid.reprol()}\n" + ) + + server: _server.IPCServer = self.ipc_server + if server: + if privates: + server_repr: str = self._ipc_server.pformat( + privates=privates, + ) + # create field ln as a key-header indented under + # and up to the section's key prefix. + # ^XXX if we were to indent `repr(Server)` to + # ': ' + # _here_^ + server_repr: str = _pformat.nest_from_op( + input_op='', # nest as sub-obj + op_suffix='', + text=server_repr, + ) + fmtstr += ( + f"{server_repr}" + ) + else: + fmtstr += ( + f'|_ipc: {server.repr_state!r}\n' + ) + + fmtstr += ( + f'|_rpc: {len(self._rpc_tasks)} active tasks\n' + ) + + # TODO, actually fix the .repr_state impl/output? + # append ipc-ctx state summary + # ctxs: dict = self._contexts + # if ctxs: + # ctx_states: dict[str, int] = {} + # for ctx in self._contexts.values(): + # ctx_state: str = ctx.repr_state + # cnt = ctx_states.setdefault(ctx_state, 0) + # ctx_states[ctx_state] = cnt + 1 + + # fmtstr += ( + # f" ctxs{ds}{ctx_states}\n" + # ) + + # runtime-state + task_name: str = '' + if task := self._task: + task_name: str = task.name + fmtstr += ( + # TODO, this just like ctx? + f'|_state: {self.repr_state!r}\n' + f' task: {task_name}\n' + f' loglevel: {self.loglevel!r}\n' + f' subactors_spawned: {len(self._actoruid2nursery)}\n' + ) + if not _state.is_root_process(): + fmtstr += f' spawn_method: {self._spawn_method!r}\n' + + if privates: + fmtstr += ( + # f' actoruid2nursery{ds}{self._actoruid2nursery}\n' + f' cancel_complete{ds}{self._cancel_complete}\n' + f' cancel_called_by_remote{ds}{self._cancel_called_by}\n' + f' cancel_called{ds}{self._cancel_called}\n' + ) + + if fmtstr: + fmtstr: str = textwrap.indent( + text=fmtstr, + prefix=' '*(1 + indent), + ) + + _repr: str = ( + f'<{type(self).__name__}(\n' + f'{fmtstr}' + f')>\n' + ) + if indent: + _repr: str = textwrap.indent( + text=_repr, + prefix=' '*indent, + ) + return _repr + + __repr__ = pformat + + @property + def reg_addrs(self) -> list[UnwrappedAddress]: ''' List of (socket) addresses for all known (and contactable) - registry actors. + registry-service actors in "unwrapped" (i.e. IPC interchange + wire-compat) form. + + If you are looking for the "wrapped" address form, use + `.registry_addrs` instead. ''' return self._reg_addrs @@ -286,7 +498,7 @@ class Actor: @reg_addrs.setter def reg_addrs( self, - addrs: list[tuple[str, int]], + addrs: list[UnwrappedAddress], ) -> None: if not addrs: log.warning( @@ -295,39 +507,16 @@ class Actor: ) return - # always sanity check the input list since it's critical - # that addrs are correct for discovery sys operation. - for addr in addrs: - if not isinstance(addr, tuple): - raise ValueError( - 'Expected `Actor.reg_addrs: list[tuple[str, int]]`\n' - f'Got {addrs}' - ) + self._reg_addrs = addrs - self._reg_addrs = addrs - - async def wait_for_peer( - self, - uid: tuple[str, str], - - ) -> tuple[trio.Event, Channel]: - ''' - Wait for a connection back from a (spawned sub-)actor with - a `uid` using a `trio.Event` for sync. - - ''' - log.debug(f'Waiting for peer {uid!r} to connect') - event = self._peer_connected.setdefault(uid, trio.Event()) - await event.wait() - log.debug(f'{uid!r} successfully connected back to us') - return ( - event, - self._peers[uid][-1], - ) + @property + def registry_addrs(self) -> list[Address]: + return [wrap_address(uw_addr) + for uw_addr in self.reg_addrs] def load_modules( self, - # debug_mode: bool = False, + ) -> None: ''' Load explicitly enabled python modules from local fs after @@ -349,6 +538,9 @@ class Actor: parent_data['init_main_from_path']) status: str = 'Attempting to import enabled modules:\n' + + modpath: str + filepath: str for modpath, filepath in self.enable_modules.items(): # XXX append the allowed module to the python path which # should allow for relative (at least downward) imports. @@ -371,6 +563,14 @@ class Actor: ) raise + # ?TODO, factor this meth-iface into a new `.rpc` subsys primitive? + # - _get_rpc_func(), + # - _deliver_ctx_payload(), + # - get_context(), + # - start_remote_task(), + # - cancel_rpc_tasks(), + # - _cancel_task(), + # def _get_rpc_func(self, ns, funcname): ''' Try to lookup and return a target RPC func from the @@ -400,416 +600,6 @@ class Actor: raise mne - # TODO: maybe change to mod-func and rename for implied - # multi-transport semantics? - async def _stream_handler( - self, - stream: trio.SocketStream, - - ) -> None: - ''' - Entry point for new inbound IPC connections on a specific - transport server. - - ''' - self._no_more_peers = trio.Event() # unset by making new - chan = Channel.from_stream(stream) - con_status: str = ( - 'New inbound IPC connection <=\n' - f'|_{chan}\n' - ) - - # send/receive initial handshake response - try: - uid: tuple|None = await self._do_handshake(chan) - except ( - # we need this for ``msgspec`` for some reason? - # for now, it's been put in the stream backend. - # trio.BrokenResourceError, - # trio.ClosedResourceError, - - TransportClosed, - ): - # XXX: This may propagate up from `Channel._aiter_recv()` - # and `MsgpackStream._inter_packets()` on a read from the - # stream particularly when the runtime is first starting up - # inside `open_root_actor()` where there is a check for - # a bound listener on the "arbiter" addr. the reset will be - # because the handshake was never meant took place. - log.runtime( - con_status - + - ' -> But failed to handshake? Ignoring..\n' - ) - return - - familiar: str = 'new-peer' - if _pre_chan := self._peers.get(uid): - familiar: str = 'pre-existing-peer' - uid_short: str = f'{uid[0]}[{uid[1][-6:]}]' - con_status += ( - f' -> Handshake with {familiar} `{uid_short}` complete\n' - ) - - if _pre_chan: - # con_status += ( - # ^TODO^ swap once we minimize conn duplication - # -[ ] last thing might be reg/unreg runtime reqs? - # log.warning( - log.debug( - f'?Wait?\n' - f'We already have IPC with peer {uid_short!r}\n' - f'|_{_pre_chan}\n' - ) - - # IPC connection tracking for both peers and new children: - # - if this is a new channel to a locally spawned - # sub-actor there will be a spawn wait even registered - # by a call to `.wait_for_peer()`. - # - if a peer is connecting no such event will exit. - event: trio.Event|None = self._peer_connected.pop( - uid, - None, - ) - if event: - con_status += ( - ' -> Waking subactor spawn waiters: ' - f'{event.statistics().tasks_waiting}\n' - f' -> Registered IPC chan for child actor {uid}@{chan.raddr}\n' - # f' {event}\n' - # f' |{event.statistics()}\n' - ) - # wake tasks waiting on this IPC-transport "connect-back" - event.set() - - else: - con_status += ( - f' -> Registered IPC chan for peer actor {uid}@{chan.raddr}\n' - ) # type: ignore - - chans: list[Channel] = self._peers[uid] - # if chans: - # # TODO: re-use channels for new connections instead - # # of always new ones? - # # => will require changing all the discovery funcs.. - - # append new channel - # TODO: can we just use list-ref directly? - chans.append(chan) - - con_status += ' -> Entering RPC msg loop..\n' - log.runtime(con_status) - - # Begin channel management - respond to remote requests and - # process received reponses. - disconnected: bool = False - last_msg: MsgType - try: - ( - disconnected, - last_msg, - ) = await process_messages( - self, - chan, - ) - except trio.Cancelled: - log.cancel( - 'IPC transport msg loop was cancelled\n' - f'c)>\n' - f' |_{chan}\n' - ) - raise - - finally: - local_nursery: ( - ActorNursery|None - ) = self._actoruid2nursery.get(uid) - - # This is set in ``Portal.cancel_actor()``. So if - # the peer was cancelled we try to wait for them - # to tear down their side of the connection before - # moving on with closing our own side. - if ( - local_nursery - and ( - self._cancel_called - or - chan._cancel_called - ) - # - # ^-TODO-^ along with this is there another condition - # that we should filter with to avoid entering this - # waiting block needlessly? - # -[ ] maybe `and local_nursery.cancelled` and/or - # only if the `._children` table is empty or has - # only `Portal`s with .chan._cancel_called == - # True` as per what we had below; the MAIN DIFF - # BEING that just bc one `Portal.cancel_actor()` - # was called, doesn't mean the whole actor-nurse - # is gonna exit any time soon right!? - # - # or - # all(chan._cancel_called for chan in chans) - - ): - log.cancel( - 'Waiting on cancel request to peer..\n' - f'c)=>\n' - f' |_{chan.uid}\n' - ) - - # XXX: this is a soft wait on the channel (and its - # underlying transport protocol) to close from the - # remote peer side since we presume that any channel - # which is mapped to a sub-actor (i.e. it's managed - # by local actor-nursery) has a message that is sent - # to the peer likely by this actor (which may be in - # a shutdown sequence due to cancellation) when the - # local runtime here is now cancelled while - # (presumably) in the middle of msg loop processing. - chan_info: str = ( - f'{chan.uid}\n' - f'|_{chan}\n' - f' |_{chan.transport}\n\n' - ) - with trio.move_on_after(0.5) as drain_cs: - drain_cs.shield = True - - # attempt to wait for the far end to close the - # channel and bail after timeout (a 2-generals - # problem on closure). - assert chan.transport - async for msg in chan.transport.drain(): - - # try to deliver any lingering msgs - # before we destroy the channel. - # This accomplishes deterministic - # ``Portal.cancel_actor()`` cancellation by - # making sure any RPC response to that call is - # delivered the local calling task. - # TODO: factor this into a helper? - log.warning( - 'Draining msg from disconnected peer\n' - f'{chan_info}' - f'{pformat(msg)}\n' - ) - # cid: str|None = msg.get('cid') - cid: str|None = msg.cid - if cid: - # deliver response to local caller/waiter - await self._deliver_ctx_payload( - chan, - cid, - msg, - ) - if drain_cs.cancelled_caught: - log.warning( - 'Timed out waiting on IPC transport channel to drain?\n' - f'{chan_info}' - ) - - # XXX NOTE XXX when no explicit call to - # `open_root_actor()` was made by the application - # (normally we implicitly make that call inside - # the first `.open_nursery()` in root-actor - # user/app code), we can assume that either we - # are NOT the root actor or are root but the - # runtime was started manually. and thus DO have - # to wait for the nursery-enterer to exit before - # shutting down the local runtime to avoid - # clobbering any ongoing subactor - # teardown/debugging/graceful-cancel. - # - # see matching note inside `._supervise.open_nursery()` - # - # TODO: should we have a separate cs + timeout - # block here? - if ( - # XXX SO either, - # - not root OR, - # - is root but `open_root_actor()` was - # entered manually (in which case we do - # the equiv wait there using the - # `devx._debug` sub-sys APIs). - not local_nursery._implicit_runtime_started - ): - log.runtime( - 'Waiting on local actor nursery to exit..\n' - f'|_{local_nursery}\n' - ) - with trio.move_on_after(0.5) as an_exit_cs: - an_exit_cs.shield = True - await local_nursery.exited.wait() - - # TODO: currently this is always triggering for every - # sub-daemon spawned from the `piker.services._mngr`? - # -[ ] how do we ensure that the IPC is supposed to - # be long lived and isn't just a register? - # |_ in the register case how can we signal that the - # ephemeral msg loop was intentional? - if ( - # not local_nursery._implicit_runtime_started - # and - an_exit_cs.cancelled_caught - ): - report: str = ( - 'Timed out waiting on local actor-nursery to exit?\n' - f'c)>\n' - f' |_{local_nursery}\n' - ) - if children := local_nursery._children: - # indent from above local-nurse repr - report += ( - f' |_{pformat(children)}\n' - ) - - log.warning(report) - - if disconnected: - # if the transport died and this actor is still - # registered within a local nursery, we report - # that the IPC layer may have failed - # unexpectedly since it may be the cause of - # other downstream errors. - entry: tuple|None = local_nursery._children.get(uid) - if entry: - proc: trio.Process - _, proc, _ = entry - - if ( - (poll := getattr(proc, 'poll', None)) - and - poll() is None # proc still alive - ): - # TODO: change log level based on - # detecting whether chan was created for - # ephemeral `.register_actor()` request! - # -[ ] also, that should be avoidable by - # re-using any existing chan from the - # `._discovery.get_registry()` call as - # well.. - log.runtime( - f'Peer IPC broke but subproc is alive?\n\n' - - f'<=x {chan.uid}@{chan.raddr}\n' - f' |_{proc}\n' - ) - - # ``Channel`` teardown and closure sequence - # drop ref to channel so it can be gc-ed and disconnected - con_teardown_status: str = ( - f'IPC channel disconnected:\n' - f'<=x uid: {chan.uid}\n' - f' |_{pformat(chan)}\n\n' - ) - chans.remove(chan) - - # TODO: do we need to be this pedantic? - if not chans: - con_teardown_status += ( - f'-> No more channels with {chan.uid}' - ) - self._peers.pop(uid, None) - - peers_str: str = '' - for uid, chans in self._peers.items(): - peers_str += ( - f'uid: {uid}\n' - ) - for i, chan in enumerate(chans): - peers_str += ( - f' |_[{i}] {pformat(chan)}\n' - ) - - con_teardown_status += ( - f'-> Remaining IPC {len(self._peers)} peers: {peers_str}\n' - ) - - # No more channels to other actors (at all) registered - # as connected. - if not self._peers: - con_teardown_status += ( - 'Signalling no more peer channel connections' - ) - self._no_more_peers.set() - - # NOTE: block this actor from acquiring the - # debugger-TTY-lock since we have no way to know if we - # cancelled it and further there is no way to ensure the - # lock will be released if acquired due to having no - # more active IPC channels. - if _state.is_root_process(): - pdb_lock = _debug.Lock - pdb_lock._blocked.add(uid) - - # TODO: NEEEDS TO BE TESTED! - # actually, no idea if this ever even enters.. XD - # - # XXX => YES IT DOES, when i was testing ctl-c - # from broken debug TTY locking due to - # msg-spec races on application using RunVar... - if ( - (ctx_in_debug := pdb_lock.ctx_in_debug) - and - (pdb_user_uid := ctx_in_debug.chan.uid) - and - local_nursery - ): - entry: tuple|None = local_nursery._children.get( - tuple(pdb_user_uid) - ) - if entry: - proc: trio.Process - _, proc, _ = entry - - if ( - (poll := getattr(proc, 'poll', None)) - and poll() is None - ): - log.cancel( - 'Root actor reports no-more-peers, BUT\n' - 'a DISCONNECTED child still has the debug ' - 'lock!\n\n' - # f'root uid: {self.uid}\n' - f'last disconnected child uid: {uid}\n' - f'locking child uid: {pdb_user_uid}\n' - ) - await _debug.maybe_wait_for_debugger( - child_in_debug=True - ) - - # TODO: just bc a child's transport dropped - # doesn't mean it's not still using the pdb - # REPL! so, - # -[ ] ideally we can check out child proc - # tree to ensure that its alive (and - # actually using the REPL) before we cancel - # it's lock acquire by doing the below! - # -[ ] create a way to read the tree of each actor's - # grandchildren such that when an - # intermediary parent is cancelled but their - # child has locked the tty, the grandparent - # will not allow the parent to cancel or - # zombie reap the child! see open issue: - # - https://github.com/goodboy/tractor/issues/320 - # ------ - ------ - # if a now stale local task has the TTY lock still - # we cancel it to allow servicing other requests for - # the lock. - if ( - (db_cs := pdb_lock.get_locking_task_cs()) - and not db_cs.cancel_called - and uid == pdb_user_uid - ): - log.critical( - f'STALE DEBUG LOCK DETECTED FOR {uid}' - ) - # TODO: figure out why this breaks tests.. - db_cs.cancel() - - log.runtime(con_teardown_status) - # finally block closure - # TODO: rename to `._deliver_payload()` since this handles # more then just `result` msgs now obvi XD async def _deliver_ctx_payload( @@ -824,11 +614,11 @@ class Actor: queue. ''' - uid: tuple[str, str] = chan.uid - assert uid, f"`chan.uid` can't be {uid}" + aid: msgtypes.Aid = chan.aid + assert aid, f"`chan.aid` can't be {aid}" try: ctx: Context = self._contexts[( - uid, + aid.uid, cid, # TODO: how to determine this tho? @@ -839,7 +629,7 @@ class Actor: 'Ignoring invalid IPC msg!?\n' f'Ctx seems to not/no-longer exist??\n' f'\n' - f'<=? {uid}\n' + f'<=? {aid.reprol()!r}\n' f' |_{pretty_struct.pformat(msg)}\n' ) match msg: @@ -888,6 +678,7 @@ class Actor: msging session's lifetime. ''' + # ?TODO, use Aid here as well? actor_uid = chan.uid assert actor_uid try: @@ -1024,11 +815,12 @@ class Actor: async def _from_parent( self, - parent_addr: tuple[str, int]|None, + parent_addr: UnwrappedAddress|None, ) -> tuple[ Channel, - list[tuple[str, int]]|None, + list[UnwrappedAddress]|None, + list[str]|None, # preferred tpts ]: ''' Bootstrap this local actor's runtime config from its parent by @@ -1040,35 +832,67 @@ class Actor: # Connect back to the parent actor and conduct initial # handshake. From this point on if we error, we # attempt to ship the exception back to the parent. - chan = Channel( - destaddr=parent_addr, + chan = await Channel.from_addr( + addr=wrap_address(parent_addr) ) - await chan.connect() + assert isinstance(chan, Channel) - # TODO: move this into a `Channel.handshake()`? - # Initial handshake: swap names. - await self._do_handshake(chan) + # init handshake: swap actor-IDs. + await chan._do_handshake(aid=self.aid) - accept_addrs: list[tuple[str, int]]|None = None + accept_addrs: list[UnwrappedAddress]|None = None if self._spawn_method == "trio": # Receive post-spawn runtime state from our parent. spawnspec: msgtypes.SpawnSpec = await chan.recv() - self._spawn_spec = spawnspec + match spawnspec: + case MsgTypeError(): + raise spawnspec + case msgtypes.SpawnSpec(): + self._spawn_spec = spawnspec + log.runtime( + 'Received runtime spec from parent:\n\n' - log.runtime( - 'Received runtime spec from parent:\n\n' + # TODO: eventually all these msgs as + # `msgspec.Struct` with a special mode that + # pformats them in multi-line mode, BUT only + # if "trace"/"util" mode is enabled? + f'{pretty_struct.pformat(spawnspec)}\n' + ) - # TODO: eventually all these msgs as - # `msgspec.Struct` with a special mode that - # pformats them in multi-line mode, BUT only - # if "trace"/"util" mode is enabled? - f'{pretty_struct.pformat(spawnspec)}\n' - ) - accept_addrs: list[tuple[str, int]] = spawnspec.bind_addrs + case _: + raise InternalError( + f'Received invalid non-`SpawnSpec` payload !?\n' + f'{spawnspec}\n' + ) + # ^^XXX TODO XXX^^^ + # when the `SpawnSpec` fails to decode the above will + # raise a `MsgTypeError` which if we do NOT ALSO + # RAISE it will tried to be pprinted in the + # log.runtime() below.. + # + # SO we gotta look at how other `chan.recv()` calls + # are wrapped and do the same for this spec receive! + # -[ ] see `._rpc` likely has the answer? - # TODO: another `Struct` for rtvs.. + # ^^^XXX NOTE XXX^^^, can't be called here! + # + # breakpoint() + # import pdbp; pdbp.set_trace() + # + # => bc we haven't yet received the + # `spawnspec._runtime_vars` which contains + # `debug_mode: bool`.. + + # `SpawnSpec.bind_addrs` + # --------------------- + accept_addrs: list[UnwrappedAddress] = spawnspec.bind_addrs + + # `SpawnSpec._runtime_vars` + # ------------------------- + # => update process-wide globals + # TODO! -[ ] another `Struct` for rtvs.. rvs: dict[str, Any] = spawnspec._runtime_vars if rvs['_debug_mode']: from .devx import ( @@ -1126,18 +950,20 @@ class Actor: f'self._infected_aio = {aio_attr}\n' ) if aio_rtv: - assert trio_runtime.GLOBAL_RUN_CONTEXT.runner.is_guest - # ^TODO^ possibly add a `sniffio` or - # `trio` pub-API for `is_guest_mode()`? + assert ( + trio_runtime.GLOBAL_RUN_CONTEXT.runner.is_guest + # and + # ^TODO^ possibly add a `sniffio` or + # `trio` pub-API for `is_guest_mode()`? + ) rvs['_is_root'] = False # obvi XD - # update process-wide globals _state._runtime_vars.update(rvs) - # XXX: ``msgspec`` doesn't support serializing tuples - # so just cash manually here since it's what our - # internals expect. + # `SpawnSpec.reg_addrs` + # --------------------- + # => update parent provided registrar contact info # self.reg_addrs = [ # TODO: we don't really NEED these as tuples? @@ -1148,82 +974,45 @@ class Actor: for val in spawnspec.reg_addrs ] - # TODO: better then monkey patching.. - # -[ ] maybe read the actual f#$-in `._spawn_spec` XD - for _, attr, value in pretty_struct.iter_fields( - spawnspec, - ): - setattr(self, attr, value) + # `SpawnSpec.enable_modules` + # --------------------- + # => extend RPC-python-module (capabilities) with + # those permitted by parent. + # + # NOTE, only the root actor should have + # a pre-permitted entry for `.devx.debug._tty_lock`. + assert not self.enable_modules + self.enable_modules.update( + spawnspec.enable_modules + ) + + self._parent_main_data = spawnspec._parent_main_data + # XXX QUESTION(s)^^^ + # -[ ] already set in `.__init__()` right, but how is + # it diff from this blatant parent copy? + # -[ ] do we need/want the .__init__() value in + # just the root case orr? return ( chan, accept_addrs, + _state._runtime_vars['_enable_tpts'] ) - except OSError: # failed to connect + # failed to connect back? + except ( + OSError, + ConnectionError, + ): log.warning( f'Failed to connect to spawning parent actor!?\n' + f'\n' f'x=> {parent_addr}\n' - f'|_{self}\n\n' + f' |_{self}\n\n' ) await self.cancel(req_chan=None) # self cancel raise - async def _serve_forever( - self, - handler_nursery: Nursery, - *, - # (host, port) to bind for channel server - listen_sockaddrs: list[tuple[str, int]]|None = None, - - task_status: TaskStatus[Nursery] = trio.TASK_STATUS_IGNORED, - ) -> None: - ''' - Start the IPC transport server, begin listening for new connections. - - This will cause an actor to continue living (and thus - blocking at the process/OS-thread level) until - `.cancel_server()` is called. - - ''' - if listen_sockaddrs is None: - listen_sockaddrs = [(None, 0)] - - self._server_down = trio.Event() - try: - async with trio.open_nursery() as server_n: - - for host, port in listen_sockaddrs: - listeners: list[trio.abc.Listener] = await server_n.start( - partial( - trio.serve_tcp, - - handler=self._stream_handler, - port=port, - host=host, - - # NOTE: configured such that new - # connections will stay alive even if - # this server is cancelled! - handler_nursery=handler_nursery, - ) - ) - sockets: list[trio.socket] = [ - getattr(listener, 'socket', 'unknown socket') - for listener in listeners - ] - log.runtime( - 'Started TCP server(s)\n' - f'|_{sockets}\n' - ) - self._listeners.extend(listeners) - - task_status.started(server_n) - - finally: - # signal the server is down since nursery above terminated - self._server_down.set() - def cancel_soon(self) -> None: ''' Cancel this actor asap; can be called from a sync context. @@ -1232,12 +1021,64 @@ class Actor: the RPC service nursery. ''' - assert self._service_n - self._service_n.start_soon( + actor_repr: str = _pformat.nest_from_op( + input_op='>c(', + text=self.pformat(), + nest_indent=1, + ) + log.cancel( + 'Actor.cancel_soon()` was called!\n' + f'>> scheduling `Actor.cancel()`\n' + f'{actor_repr}' + ) + assert self._service_tn + self._service_tn.start_soon( self.cancel, None, # self cancel all rpc tasks ) + # schedule a "canceller task" in the `._root_tn` once the + # `._service_tn` is fully shutdown; task waits for child-ish + # scopes to fully exit then finally cancels its parent, + # root-most, scope. + async def cancel_root_tn_after_services(): + log.runtime( + 'Waiting on service-tn to cancel..\n' + f'c>)\n' + f'|_{self._service_tn.cancel_scope!r}\n' + ) + await self._cancel_complete.wait() + log.cancel( + f'`._service_tn` cancelled\n' + f'>c)\n' + f'|_{self._service_tn.cancel_scope!r}\n' + f'\n' + f'>> cancelling `._root_tn`\n' + f'c>(\n' + f' |_{self._root_tn.cancel_scope!r}\n' + ) + self._root_tn.cancel_scope.cancel() + + self._root_tn.start_soon( + cancel_root_tn_after_services + ) + + @property + def cancel_complete(self) -> bool: + return self._cancel_complete.is_set() + + @property + def cancel_called(self) -> bool: + ''' + Was this actor requested to cancel by a remote peer actor. + + ''' + return self._cancel_called_by is not None + + @property + def cancel_caller(self) -> msgtypes.Aid|None: + return self._cancel_called_by + async def cancel( self, @@ -1262,20 +1103,18 @@ class Actor: ''' ( - requesting_uid, - requester_type, + requesting_aid, # Aid + requester_type, # str req_chan, log_meth, ) = ( - req_chan.uid, + req_chan.aid, 'peer', req_chan, log.cancel, - ) if req_chan else ( - # a self cancel of ALL rpc tasks - self.uid, + self.aid, 'self', self, log.runtime, @@ -1283,14 +1122,14 @@ class Actor: # TODO: just use the new `Context.repr_rpc: str` (and # other) repr fields instead of doing this all manual.. msg: str = ( - f'Actor-runtime cancel request from {requester_type}\n\n' - f'<=c) {requesting_uid}\n' - f' |_{self}\n' + f'Actor-runtime cancel request from {requester_type!r}\n' f'\n' + f'<=c)\n' + f'{self}' ) # TODO: what happens here when we self-cancel tho? - self._cancel_called_by_remote: tuple = requesting_uid + self._cancel_called_by: tuple = requesting_aid self._cancel_called = True # cancel all ongoing rpc tasks @@ -1298,7 +1137,7 @@ class Actor: # kill any debugger request task to avoid deadlock # with the root actor in this tree - debug_req = _debug.DebugStatus + debug_req = debug.DebugStatus lock_req_ctx: Context = debug_req.req_ctx if ( lock_req_ctx @@ -1308,7 +1147,7 @@ class Actor: msg += ( f'\n' f'-> Cancelling active debugger request..\n' - f'|_{_debug.Lock.repr()}\n\n' + f'|_{debug.Lock.repr()}\n\n' f'|_{lock_req_ctx}\n\n' ) # lock_req_ctx._scope.cancel() @@ -1318,40 +1157,28 @@ class Actor: # self-cancel **all** ongoing RPC tasks await self.cancel_rpc_tasks( - req_uid=requesting_uid, + req_aid=requesting_aid, parent_chan=None, ) # stop channel server - self.cancel_server() - if self._server_down is not None: - await self._server_down.wait() - else: - log.warning( - 'Transport[TCP] server was cancelled start?' - ) + if ipc_server := self.ipc_server: + ipc_server.cancel() + await ipc_server.wait_for_shutdown() # cancel all rpc tasks permanently - if self._service_n: - self._service_n.cancel_scope.cancel() + if self._service_tn: + self._service_tn.cancel_scope.cancel() log_meth(msg) self._cancel_complete.set() return True - # XXX: hard kill logic if needed? - # def _hard_mofo_kill(self): - # # If we're the root actor or zombied kill everything - # if self._parent_chan is None: # TODO: more robust check - # root = trio.lowlevel.current_root_task() - # for n in root.child_nurseries: - # n.cancel_scope.cancel() - async def _cancel_task( self, cid: str, parent_chan: Channel, - requesting_uid: tuple[str, str]|None, + requesting_aid: msgtypes.Aid|None, ipc_msg: dict|None|bool = False, @@ -1389,7 +1216,7 @@ class Actor: log.runtime( 'Cancel request for invalid RPC task.\n' 'The task likely already completed or was never started!\n\n' - f'<= canceller: {requesting_uid}\n' + f'<= canceller: {requesting_aid}\n' f'=> {cid}@{parent_chan.uid}\n' f' |_{parent_chan}\n' ) @@ -1397,9 +1224,12 @@ class Actor: log.cancel( 'Rxed cancel request for RPC task\n' - f'<=c) {requesting_uid}\n' - f' |_{ctx._task}\n' - f' >> {ctx.repr_rpc}\n' + f'{ctx._task!r} <=c) {requesting_aid}\n' + f'|_>> {ctx.repr_rpc}\n' + + # f'|_{ctx._task}\n' + # f' >> {ctx.repr_rpc}\n' + # f'=> {ctx._task}\n' # f' >> Actor._cancel_task() => {ctx._task}\n' # f' |_ {ctx._task}\n\n' @@ -1420,9 +1250,9 @@ class Actor: ) if ( ctx._canceller is None - and requesting_uid + and requesting_aid ): - ctx._canceller: tuple = requesting_uid + ctx._canceller: tuple = requesting_aid.uid # TODO: pack the RPC `{'cmd': }` msg into a ctxc and # then raise and pack it here? @@ -1448,7 +1278,7 @@ class Actor: # wait for _invoke to mark the task complete flow_info: str = ( - f'<= canceller: {requesting_uid}\n' + f'<= canceller: {requesting_aid}\n' f'=> ipc-parent: {parent_chan}\n' f'|_{ctx}\n' ) @@ -1465,7 +1295,7 @@ class Actor: async def cancel_rpc_tasks( self, - req_uid: tuple[str, str], + req_aid: msgtypes.Aid, # NOTE: when None is passed we cancel **all** rpc # tasks running in this actor! @@ -1475,14 +1305,14 @@ class Actor: ''' Cancel all ongoing RPC tasks owned/spawned for a given `parent_chan: Channel` or simply all tasks (inside - `._service_n`) when `parent_chan=None`. + `._service_tn`) when `parent_chan=None`. ''' tasks: dict = self._rpc_tasks if not tasks: log.runtime( 'Actor has no cancellable RPC tasks?\n' - f'<= canceller: {req_uid}\n' + f'<= canceller: {req_aid.reprol()}\n' ) return @@ -1522,7 +1352,7 @@ class Actor: ) log.cancel( f'Cancelling {descr} RPC tasks\n\n' - f'<=c) {req_uid} [canceller]\n' + f'<=c) {req_aid} [canceller]\n' f'{rent_chan_repr}' f'c)=> {self.uid} [cancellee]\n' f' |_{self} [with {len(tasks)} tasks]\n' @@ -1550,7 +1380,7 @@ class Actor: await self._cancel_task( cid, task_caller_chan, - requesting_uid=req_uid, + requesting_aid=req_aid, ) if tasks: @@ -1560,103 +1390,31 @@ class Actor: ) await self._ongoing_rpc_tasks.wait() - def cancel_server(self) -> bool: - ''' - Cancel the internal IPC transport server nursery thereby - preventing any new inbound IPC connections establishing. - - ''' - if self._server_n: - # TODO: obvi a different server type when we eventually - # support some others XD - server_prot: str = 'TCP' - log.runtime( - f'Cancelling {server_prot} server' - ) - self._server_n.cancel_scope.cancel() - return True - - return False - @property - def accept_addrs(self) -> list[tuple[str, int]]: + def accept_addrs(self) -> list[UnwrappedAddress]: ''' All addresses to which the transport-channel server binds and listens for new connections. ''' - # throws OSError on failure - return [ - listener.socket.getsockname() - for listener in self._listeners - ] # type: ignore + return self._ipc_server.accept_addrs @property - def accept_addr(self) -> tuple[str, int]: + def accept_addr(self) -> UnwrappedAddress: ''' Primary address to which the IPC transport server is bound and listening for new connections. ''' - # throws OSError on failure return self.accept_addrs[0] - def get_parent(self) -> Portal: - ''' - Return a `Portal` to our parent. - - ''' - assert self._parent_chan, "No parent channel for this actor?" - return Portal(self._parent_chan) - - def get_chans( - self, - uid: tuple[str, str], - - ) -> list[Channel]: - ''' - Return all IPC channels to the actor with provided `uid`. - - ''' - return self._peers[uid] - - # TODO: move to `Channel.handshake(uid)` - async def _do_handshake( - self, - chan: Channel - - ) -> msgtypes.Aid: - ''' - Exchange `(name, UUIDs)` identifiers as the first - communication step with any (peer) remote `Actor`. - - These are essentially the "mailbox addresses" found in - "actor model" parlance. - - ''' - name, uuid = self.uid - await chan.send( - msgtypes.Aid( - name=name, - uuid=uuid, - ) - ) - aid: msgtypes.Aid = await chan.recv() - chan.aid = aid - - uid: tuple[str, str] = ( - # str(value[0]), - # str(value[1]) - aid.name, - aid.uuid, - ) - - if not isinstance(uid, tuple): - raise ValueError(f"{uid} is not a valid uid?!") - - chan.uid = uid - return uid - + # TODO, this should delegate ONLY to the + # `._spawn_spec._runtime_vars: dict` / `._state` APIs? + # + # XXX, AH RIGHT that's why.. + # it's bc we pass this as a CLI flag to the child.py precisely + # bc we need the bootstrapping pre `async_main()`.. but maybe + # keep this as an impl deat and not part of the pub iface impl? def is_infected_aio(self) -> bool: ''' If `True`, this actor is running `trio` in guest mode on @@ -1667,10 +1425,27 @@ class Actor: ''' return self._infected_aio + # ?TODO, is this the right type for this method? + def get_parent(self) -> Portal: + ''' + Return a `Portal` to our parent. + + ''' + assert self._parent_chan, "No parent channel for this actor?" + return Portal(self._parent_chan) + + # XXX: hard kill logic if needed? + # def _hard_mofo_kill(self): + # # If we're the root actor or zombied kill everything + # if self._parent_chan is None: # TODO: more robust check + # root = trio.lowlevel.current_root_task() + # for n in root.child_nurseries: + # n.cancel_scope.cancel() + async def async_main( actor: Actor, - accept_addrs: tuple[str, int]|None = None, + accept_addrs: UnwrappedAddress|None = None, # XXX: currently ``parent_addr`` is only needed for the # ``multiprocessing`` backend (which pickles state sent to @@ -1679,7 +1454,7 @@ async def async_main( # change this to a simple ``is_subactor: bool`` which will # be False when running as root actor and True when as # a subactor. - parent_addr: tuple[str, int]|None = None, + parent_addr: UnwrappedAddress|None = None, task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED, ) -> None: @@ -1694,22 +1469,32 @@ async def async_main( the actor's "runtime" and all thus all ongoing RPC tasks. ''' + # XXX NOTE, `_state._current_actor` **must** be set prior to + # calling this core runtime entrypoint! + assert actor is _state.current_actor() + + actor._task: trio.Task = trio.lowlevel.current_task() + # attempt to retreive ``trio``'s sigint handler and stash it # on our debugger state. - _debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) + debug.DebugStatus._trio_handler = signal.getsignal(signal.SIGINT) is_registered: bool = False try: # establish primary connection with immediate parent actor._parent_chan: Channel|None = None - if parent_addr is not None: + # is this a sub-actor? + # get runtime info from parent. + if parent_addr is not None: ( actor._parent_chan, set_accept_addr_says_rent, + maybe_preferred_transports_says_rent, ) = await actor._from_parent(parent_addr) + accept_addrs: list[UnwrappedAddress] = [] # either it's passed in because we're not a child or # because we're running in mp mode if ( @@ -1718,26 +1503,69 @@ async def async_main( set_accept_addr_says_rent is not None ): accept_addrs = set_accept_addr_says_rent + else: + enable_transports: list[str] = ( + maybe_preferred_transports_says_rent + or + [_state._def_tpt_proto] + ) + for transport_key in enable_transports: + transport_cls: Type[Address] = get_address_cls( + transport_key + ) + addr: Address = transport_cls.get_random() + accept_addrs.append(addr.unwrap()) - # The "root" nursery ensures the channel with the immediate - # parent is kept alive as a resilient service until - # cancellation steps have (mostly) occurred in - # a deterministic way. - async with trio.open_nursery( - strict_exception_groups=False, - ) as root_nursery: - actor._root_n = root_nursery - assert actor._root_n + assert accept_addrs - async with trio.open_nursery( - strict_exception_groups=False, - ) as service_nursery: - # This nursery is used to handle all inbound - # connections to us such that if the TCP server - # is killed, connections can continue to process - # in the background until this nursery is cancelled. - actor._service_n = service_nursery - assert actor._service_n + ya_root_tn: bool = bool(actor._root_tn) + ya_service_tn: bool = bool(actor._service_tn) + + # NOTE, a top-most "root" nursery in each actor-process + # enables a lifetime priority for the IPC-channel connection + # with a sub-actor's immediate parent. I.e. this connection + # is kept alive as a resilient service connection until all + # other machinery has exited, cancellation of all + # embedded/child scopes have completed. This helps ensure + # a deterministic (and thus "graceful") + # first-class-supervision style teardown where a parent actor + # (vs. say peers) is always the last to be contacted before + # disconnect. + root_tn: trio.Nursery + async with ( + collapse_eg(), + maybe_open_nursery( + nursery=actor._root_tn, + ) as root_tn, + ): + if ya_root_tn: + assert root_tn is actor._root_tn + else: + actor._root_tn = root_tn + + ipc_server: _server.IPCServer + async with ( + collapse_eg(), + maybe_open_nursery( + nursery=actor._service_tn, + ) as service_tn, + _server.open_ipc_server( + parent_tn=service_tn, # ?TODO, why can't this be the root-tn + stream_handler_tn=service_tn, + ) as ipc_server, + + ): + if ya_service_tn: + assert service_tn is actor._service_tn + else: + # This nursery is used to handle all inbound + # connections to us such that if the TCP server + # is killed, connections can continue to process + # in the background until this nursery is cancelled. + actor._service_tn = service_tn + + # set after allocate + actor._ipc_server = ipc_server # load exposed/allowed RPC modules # XXX: do this **after** establishing a channel to the parent @@ -1753,7 +1581,7 @@ async def async_main( # try: # actor.load_modules() # except ModuleNotFoundError as err: - # _debug.pause_from_sync() + # debug.pause_from_sync() # import pdbp; pdbp.set_trace() # raise @@ -1761,42 +1589,40 @@ async def async_main( # - subactor: the bind address is sent by our parent # over our established channel # - root actor: the ``accept_addr`` passed to this method - assert accept_addrs + # TODO: why is this not with the root nursery? + # - see above that the `._service_tn` is what's used? try: - # TODO: why is this not with the root nursery? - actor._server_n = await service_nursery.start( - partial( - actor._serve_forever, - service_nursery, - listen_sockaddrs=accept_addrs, - ) + eps: list = await ipc_server.listen_on( + accept_addrs=accept_addrs, + stream_handler_nursery=service_tn, ) + log.runtime( + f'Booted IPC server\n' + f'{ipc_server}\n' + ) + assert ( + (eps[0].listen_tn) + is not service_tn + ) + except OSError as oserr: # NOTE: always allow runtime hackers to debug # tranport address bind errors - normally it's # something silly like the wrong socket-address # passed via a config or CLI Bo - entered_debug: bool = await _debug._maybe_enter_pm(oserr) + entered_debug: bool = await debug._maybe_enter_pm( + oserr, + ) if not entered_debug: - log.exception('Failed to init IPC channel server !?\n') + log.exception('Failed to init IPC server !?\n') else: log.runtime('Exited debug REPL..') raise - accept_addrs: list[tuple[str, int]] = actor.accept_addrs - - # NOTE: only set the loopback addr for the - # process-tree-global "root" mailbox since - # all sub-actors should be able to speak to - # their root actor over that channel. - if _state._runtime_vars['_is_root']: - for addr in accept_addrs: - host, _ = addr - # TODO: generic 'lo' detector predicate - if '127.0.0.1' in host: - _state._runtime_vars['_root_mailbox'] = addr + # TODO, just read direct from ipc_server? + accept_addrs: list[UnwrappedAddress] = actor.accept_addrs # Register with the arbiter if we're told its addr log.runtime( @@ -1810,24 +1636,24 @@ async def async_main( # only on unique actor uids? for addr in actor.reg_addrs: try: - assert isinstance(addr, tuple) - assert addr[1] # non-zero after bind + waddr = wrap_address(addr) + assert waddr.is_valid except AssertionError: - await _debug.pause() + await debug.pause() - async with get_registry(*addr) as reg_portal: + # !TODO, get rid of the local-portal crap XD + async with get_registry(addr) as reg_portal: for accept_addr in accept_addrs: + accept_addr = wrap_address(accept_addr) - if not accept_addr[1]: - await _debug.pause() - - assert accept_addr[1] + if not accept_addr.is_valid: + breakpoint() await reg_portal.run_from_ns( 'self', 'register_actor', uid=actor.uid, - sockaddr=accept_addr, + addr=accept_addr.unwrap(), ) is_registered: bool = True @@ -1840,11 +1666,10 @@ async def async_main( # start processing parent requests until our channel # server is 100% up and running. if actor._parent_chan: - await root_nursery.start( + await root_tn.start( partial( - process_messages, - actor, - actor._parent_chan, + _rpc.process_messages, + chan=actor._parent_chan, shield=True, ) ) @@ -1853,8 +1678,9 @@ async def async_main( # 'Blocking on service nursery to exit..\n' ) log.runtime( - "Service nursery complete\n" - "Waiting on root nursery to complete" + 'Service nursery complete\n' + '\n' + '->} waiting on root nursery to complete..\n' ) # Blocks here as expected until the root nursery is @@ -1885,7 +1711,7 @@ async def async_main( log.exception(err_report) if actor._parent_chan: - await try_ship_error_to_remote( + await _rpc.try_ship_error_to_remote( actor._parent_chan, internal_err, ) @@ -1909,6 +1735,7 @@ async def async_main( finally: teardown_report: str = ( 'Main actor-runtime task completed\n' + '\n' ) # ?TODO? should this be in `._entry`/`._root` mods instead? @@ -1932,34 +1759,36 @@ async def async_main( # prevents any `infected_aio` actor from continuing # and any callbacks in the `ls` here WILL NOT be # called!! - # await _debug.pause(shield=True) + # await debug.pause(shield=True) ls.close() # XXX TODO but hard XXX # we can't actually do this bc the debugger uses the - # _service_n to spawn the lock task, BUT, in theory if we had + # _service_tn to spawn the lock task, BUT, in theory if we had # the root nursery surround this finally block it might be # actually possible to debug THIS machinery in the same way # as user task code? # # if actor.name == 'brokerd.ib': # with CancelScope(shield=True): - # await _debug.breakpoint() + # await debug.breakpoint() # Unregister actor from the registry-sys / registrar. if ( is_registered - and not actor.is_registrar + and + not actor.is_registrar ): failed: bool = False for addr in actor.reg_addrs: - assert isinstance(addr, tuple) + waddr = wrap_address(addr) + assert waddr.is_valid with trio.move_on_after(0.5) as cs: cs.shield = True try: async with get_registry( - *addr, + addr, ) as reg_portal: await reg_portal.run_from_ns( 'self', @@ -1978,38 +1807,47 @@ async def async_main( ) # Ensure all peers (actors connected to us as clients) are finished - if not actor._no_more_peers.is_set(): - if any( - chan.connected() for chan in chain(*actor._peers.values()) - ): - teardown_report += ( - f'-> Waiting for remaining peers {actor._peers} to clear..\n' - ) - log.runtime(teardown_report) - with CancelScope(shield=True): - await actor._no_more_peers.wait() + if ( + (ipc_server := actor.ipc_server) + and + ipc_server.has_peers(check_chans=True) + ): + teardown_report += ( + f'-> Waiting for remaining peers to clear..\n' + f' {pformat(ipc_server._peers)}' + ) + log.runtime(teardown_report) + await ipc_server.wait_for_no_more_peers() teardown_report += ( - '-> All peer channels are complete\n' + '-]> all peer channels are complete.\n' ) + # op_nested_actor_repr: str = _pformat.nest_from_op( + # input_op=')>', + # text=actor.pformat(), + # nest_prefix='|_', + # nest_indent=1, # under > + # ) teardown_report += ( - 'Actor runtime exiting\n' - f'>)\n' - f'|_{actor}\n' + '-)> actor runtime main task exit.\n' + # f'{op_nested_actor_repr}' ) - log.info(teardown_report) + # if _state._runtime_vars['_is_root']: + # log.info(teardown_report) + # else: + log.runtime(teardown_report) -# TODO: rename to `Registry` and move to `._discovery`! +# TODO: rename to `Registry` and move to `.discovery._registry`! class Arbiter(Actor): ''' - A special registrar actor who can contact all other actors - within its immediate process tree and possibly keeps a registry - of others meant to be discoverable in a distributed - application. Normally the registrar is also the "root actor" - and thus always has access to the top-most-level actor - (process) nursery. + A special registrar (and for now..) `Actor` who can contact all + other actors within its immediate process tree and possibly keeps + a registry of others meant to be discoverable in a distributed + application. Normally the registrar is also the "root actor" and + thus always has access to the top-most-level actor (process) + nursery. By default, the registrar is always initialized when and if no other registrar socket addrs have been specified to runtime @@ -2029,6 +1867,12 @@ class Arbiter(Actor): ''' is_arbiter = True + # TODO, implement this as a read on there existing a `._state` of + # some sort setup by whenever we impl this all as + # a `.discovery._registry.open_registry()` API + def is_registry(self) -> bool: + return self.is_arbiter + def __init__( self, *args, @@ -2037,7 +1881,7 @@ class Arbiter(Actor): self._registry: dict[ tuple[str, str], - tuple[str, int], + UnwrappedAddress, ] = {} self._waiters: dict[ str, @@ -2053,18 +1897,18 @@ class Arbiter(Actor): self, name: str, - ) -> tuple[str, int]|None: + ) -> UnwrappedAddress|None: - for uid, sockaddr in self._registry.items(): + for uid, addr in self._registry.items(): if name in uid: - return sockaddr + return addr return None async def get_registry( self - ) -> dict[str, tuple[str, int]]: + ) -> dict[str, UnwrappedAddress]: ''' Return current name registry. @@ -2084,7 +1928,7 @@ class Arbiter(Actor): self, name: str, - ) -> list[tuple[str, int]]: + ) -> list[UnwrappedAddress]: ''' Wait for a particular actor to register. @@ -2092,44 +1936,41 @@ class Arbiter(Actor): registered. ''' - sockaddrs: list[tuple[str, int]] = [] - sockaddr: tuple[str, int] + addrs: list[UnwrappedAddress] = [] + addr: UnwrappedAddress mailbox_info: str = 'Actor registry contact infos:\n' - for uid, sockaddr in self._registry.items(): + for uid, addr in self._registry.items(): mailbox_info += ( f'|_uid: {uid}\n' - f'|_sockaddr: {sockaddr}\n\n' + f'|_addr: {addr}\n\n' ) if name == uid[0]: - sockaddrs.append(sockaddr) + addrs.append(addr) - if not sockaddrs: + if not addrs: waiter = trio.Event() self._waiters.setdefault(name, []).append(waiter) await waiter.wait() for uid in self._waiters[name]: if not isinstance(uid, trio.Event): - sockaddrs.append(self._registry[uid]) + addrs.append(self._registry[uid]) log.runtime(mailbox_info) - return sockaddrs + return addrs async def register_actor( self, uid: tuple[str, str], - sockaddr: tuple[str, int] - + addr: UnwrappedAddress ) -> None: uid = name, hash = (str(uid[0]), str(uid[1])) - addr = (host, port) = ( - str(sockaddr[0]), - int(sockaddr[1]), - ) - if port == 0: - await _debug.pause() - assert port # should never be 0-dynamic-os-alloc + waddr: Address = wrap_address(addr) + if not waddr.is_valid: + # should never be 0-dynamic-os-alloc + await debug.pause() + self._registry[uid] = addr # pop and signal all waiter events diff --git a/tractor/_spawn.py b/tractor/_spawn.py index 3159508d..8d3c2cf6 100644 --- a/tractor/_spawn.py +++ b/tractor/_spawn.py @@ -34,9 +34,9 @@ from typing import ( import trio from trio import TaskStatus -from .devx._debug import ( - maybe_wait_for_debugger, - acquire_debug_lock, +from .devx import ( + debug, + pformat as _pformat ) from tractor._state import ( current_actor, @@ -46,19 +46,26 @@ from tractor._state import ( _runtime_vars, ) from tractor.log import get_logger +from tractor._addr import UnwrappedAddress from tractor._portal import Portal from tractor._runtime import Actor from tractor._entry import _mp_main from tractor._exceptions import ActorFailure -from tractor.msg.types import ( - SpawnSpec, +from tractor.msg import ( + types as msgtypes, + pretty_struct, ) if TYPE_CHECKING: + from ipc import ( + _server, + Channel, + ) from ._supervise import ActorNursery ProcessType = TypeVar('ProcessType', mp.Process, trio.Process) + log = get_logger('tractor') # placeholder for an mp start context if so using that backend @@ -163,7 +170,7 @@ async def exhaust_portal( # TODO: merge with above? log.warning( 'Cancelled portal result waiter task:\n' - f'uid: {portal.channel.uid}\n' + f'uid: {portal.channel.aid}\n' f'error: {err}\n' ) return err @@ -171,7 +178,7 @@ async def exhaust_portal( else: log.debug( f'Returning final result from portal:\n' - f'uid: {portal.channel.uid}\n' + f'uid: {portal.channel.aid}\n' f'result: {final}\n' ) return final @@ -229,10 +236,6 @@ async def hard_kill( # whilst also hacking on it XD # terminate_after: int = 99999, - # NOTE: for mucking with `.pause()`-ing inside the runtime - # whilst also hacking on it XD - # terminate_after: int = 99999, - ) -> None: ''' Un-gracefully terminate an OS level `trio.Process` after timeout. @@ -294,6 +297,23 @@ async def hard_kill( # zombies (as a feature) we ask the OS to do send in the # removal swad as the last resort. if cs.cancelled_caught: + + # TODO? attempt at intermediary-rent-sub + # with child in debug lock? + # |_https://github.com/goodboy/tractor/issues/320 + # + # if not is_root_process(): + # log.warning( + # 'Attempting to acquire debug-REPL-lock before zombie reap!' + # ) + # with trio.CancelScope(shield=True): + # async with debug.acquire_debug_lock( + # subactor_uid=current_actor().uid, + # ) as _ctx: + # log.warning( + # 'Acquired debug lock, child ready to be killed ??\n' + # ) + # TODO: toss in the skynet-logo face as ascii art? log.critical( # 'Well, the #ZOMBIE_LORD_IS_HERE# to collect\n' @@ -324,20 +344,21 @@ async def soft_kill( see `.hard_kill()`). ''' - uid: tuple[str, str] = portal.channel.uid + chan: Channel = portal.channel + peer_aid: msgtypes.Aid = chan.aid try: log.cancel( f'Soft killing sub-actor via portal request\n' f'\n' - f'(c=> {portal.chan.uid}\n' - f' |_{proc}\n' + f'c)=> {peer_aid.reprol()}@[{chan.maddr}]\n' + f' |_{proc}\n' ) # wait on sub-proc to signal termination await wait_func(proc) except trio.Cancelled: with trio.CancelScope(shield=True): - await maybe_wait_for_debugger( + await debug.maybe_wait_for_debugger( child_in_debug=_runtime_vars.get( '_debug_mode', False ), @@ -378,7 +399,7 @@ async def soft_kill( if proc.poll() is None: # type: ignore log.warning( 'Subactor still alive after cancel request?\n\n' - f'uid: {uid}\n' + f'uid: {peer_aid}\n' f'|_{proc}\n' ) n.cancel_scope.cancel() @@ -392,14 +413,15 @@ async def new_proc( errors: dict[tuple[str, str], Exception], # passed through to actor main - bind_addrs: list[tuple[str, int]], - parent_addr: tuple[str, int], + bind_addrs: list[UnwrappedAddress], + parent_addr: UnwrappedAddress, _runtime_vars: dict[str, Any], # serialized and sent to _child *, infect_asyncio: bool = False, - task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED + task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED, + proc_kwargs: dict[str, any] = {} ) -> None: @@ -419,6 +441,7 @@ async def new_proc( _runtime_vars, # run time vars infect_asyncio=infect_asyncio, task_status=task_status, + proc_kwargs=proc_kwargs ) @@ -429,12 +452,13 @@ async def trio_proc( errors: dict[tuple[str, str], Exception], # passed through to actor main - bind_addrs: list[tuple[str, int]], - parent_addr: tuple[str, int], + bind_addrs: list[UnwrappedAddress], + parent_addr: UnwrappedAddress, _runtime_vars: dict[str, Any], # serialized and sent to _child *, infect_asyncio: bool = False, - task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED + task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED, + proc_kwargs: dict[str, any] = {} ) -> None: ''' @@ -456,6 +480,9 @@ async def trio_proc( # the OS; it otherwise can be passed via the parent channel if # we prefer in the future (for privacy). "--uid", + # TODO, how to pass this over "wire" encodings like + # cmdline args? + # -[ ] maybe we can add an `msgtypes.Aid.min_tuple()` ? str(subactor.uid), # Address the child must connect to on startup "--parent_addr", @@ -473,18 +500,20 @@ async def trio_proc( cancelled_during_spawn: bool = False proc: trio.Process|None = None + ipc_server: _server.Server = actor_nursery._actor.ipc_server try: try: - proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd) + proc: trio.Process = await trio.lowlevel.open_process(spawn_cmd, **proc_kwargs) log.runtime( - 'Started new child\n' - f'|_{proc}\n' + f'Started new child subproc\n' + f'(>\n' + f' |_{proc}\n' ) # wait for actor to spawn and connect back to us # channel should have handshake completed by the # local actor by the time we get a ref to it - event, chan = await actor_nursery._actor.wait_for_peer( + event, chan = await ipc_server.wait_for_peer( subactor.uid ) @@ -496,10 +525,10 @@ async def trio_proc( with trio.CancelScope(shield=True): # don't clobber an ongoing pdb if is_root_process(): - await maybe_wait_for_debugger() + await debug.maybe_wait_for_debugger() elif proc is not None: - async with acquire_debug_lock(subactor.uid): + async with debug.acquire_debug_lock(subactor.uid): # soft wait on the proc to terminate with trio.move_on_after(0.5): await proc.wait() @@ -517,15 +546,20 @@ async def trio_proc( # send a "spawning specification" which configures the # initial runtime state of the child. - await chan.send( - SpawnSpec( - _parent_main_data=subactor._parent_main_data, - enable_modules=subactor.enable_modules, - reg_addrs=subactor.reg_addrs, - bind_addrs=bind_addrs, - _runtime_vars=_runtime_vars, - ) + sspec = msgtypes.SpawnSpec( + _parent_main_data=subactor._parent_main_data, + enable_modules=subactor.enable_modules, + reg_addrs=subactor.reg_addrs, + bind_addrs=bind_addrs, + _runtime_vars=_runtime_vars, ) + log.runtime( + f'Sending spawn spec to child\n' + f'{{}}=> {chan.aid.reprol()!r}\n' + f'\n' + f'{pretty_struct.pformat(sspec)}\n' + ) + await chan.send(sspec) # track subactor in current nursery curr_actor: Actor = current_actor() @@ -552,7 +586,7 @@ async def trio_proc( # condition. await soft_kill( proc, - trio.Process.wait, + trio.Process.wait, # XXX, uses `pidfd_open()` below. portal ) @@ -560,8 +594,7 @@ async def trio_proc( # tandem if not done already log.cancel( 'Cancelling portal result reaper task\n' - f'>c)\n' - f' |_{subactor.uid}\n' + f'c)> {subactor.aid.reprol()!r}\n' ) nursery.cancel_scope.cancel() @@ -570,21 +603,24 @@ async def trio_proc( # allowed! Do this **after** cancellation/teardown to avoid # killing the process too early. if proc: + reap_repr: str = _pformat.nest_from_op( + input_op='>x)', + text=subactor.pformat(), + ) log.cancel( f'Hard reap sequence starting for subactor\n' - f'>x)\n' - f' |_{subactor}@{subactor.uid}\n' + f'{reap_repr}' ) with trio.CancelScope(shield=True): # don't clobber an ongoing pdb if cancelled_during_spawn: # Try again to avoid TTY clobbering. - async with acquire_debug_lock(subactor.uid): + async with debug.acquire_debug_lock(subactor.uid): with trio.move_on_after(0.5): await proc.wait() - await maybe_wait_for_debugger( + await debug.maybe_wait_for_debugger( child_in_debug=_runtime_vars.get( '_debug_mode', False ), @@ -613,7 +649,7 @@ async def trio_proc( # acquire the lock and get notified of who has it, # check that uid against our known children? # this_uid: tuple[str, str] = current_actor().uid - # await acquire_debug_lock(this_uid) + # await debug.acquire_debug_lock(this_uid) if proc.poll() is None: log.cancel(f"Attempting to hard kill {proc}") @@ -635,12 +671,13 @@ async def mp_proc( subactor: Actor, errors: dict[tuple[str, str], Exception], # passed through to actor main - bind_addrs: list[tuple[str, int]], - parent_addr: tuple[str, int], + bind_addrs: list[UnwrappedAddress], + parent_addr: UnwrappedAddress, _runtime_vars: dict[str, Any], # serialized and sent to _child *, infect_asyncio: bool = False, - task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED + task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED, + proc_kwargs: dict[str, any] = {} ) -> None: @@ -715,12 +752,14 @@ async def mp_proc( log.runtime(f"Started {proc}") + ipc_server: _server.Server = actor_nursery._actor.ipc_server try: # wait for actor to spawn and connect back to us # channel should have handshake completed by the # local actor by the time we get a ref to it - event, chan = await actor_nursery._actor.wait_for_peer( - subactor.uid) + event, chan = await ipc_server.wait_for_peer( + subactor.uid, + ) # XXX: monkey patch poll API to match the ``subprocess`` API.. # not sure why they don't expose this but kk. diff --git a/tractor/_state.py b/tractor/_state.py index 79c8bdea..2a47e548 100644 --- a/tractor/_state.py +++ b/tractor/_state.py @@ -14,16 +14,19 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . -""" -Per process state +''' +Per actor-process runtime state mgmt APIs. -""" +''' from __future__ import annotations from contextvars import ( ContextVar, ) +import os +from pathlib import Path from typing import ( Any, + Literal, TYPE_CHECKING, ) @@ -34,20 +37,39 @@ if TYPE_CHECKING: from ._context import Context +# default IPC transport protocol settings +TransportProtocolKey = Literal[ + 'tcp', + 'uds', +] +_def_tpt_proto: TransportProtocolKey = 'tcp' + _current_actor: Actor|None = None # type: ignore # noqa _last_actor_terminated: Actor|None = None # TODO: mk this a `msgspec.Struct`! +# -[ ] type out all fields obvi! +# -[ ] (eventually) mk wire-ready for monitoring? _runtime_vars: dict[str, Any] = { - '_debug_mode': False, - '_is_root': False, - '_root_mailbox': (None, None), + # root of actor-process tree info + '_is_root': False, # bool + '_root_mailbox': (None, None), # tuple[str|None, str|None] + '_root_addrs': [], # tuple[str|None, str|None] + + # parent->chld ipc protocol caps + '_enable_tpts': [_def_tpt_proto], + + # registrar info '_registry_addrs': [], - '_is_infected_aio': False, - + # `debug_mode: bool` settings + '_debug_mode': False, # bool + 'repl_fixture': False, # |AbstractContextManager[bool] # for `tractor.pause_from_sync()` & `breakpoint()` support 'use_greenback': False, + + # infected-`asyncio`-mode: `trio` running as guest. + '_is_infected_aio': False, } @@ -99,7 +121,7 @@ def current_actor( return _current_actor -def is_main_process() -> bool: +def is_root_process() -> bool: ''' Bool determining if this actor is running in the top-most process. @@ -108,8 +130,10 @@ def is_main_process() -> bool: return mp.current_process().name == 'MainProcess' -# TODO, more verby name? -def debug_mode() -> bool: +is_main_process = is_root_process + + +def is_debug_mode() -> bool: ''' Bool determining if "debug mode" is on which enables remote subactor pdb entry on crashes. @@ -118,6 +142,9 @@ def debug_mode() -> bool: return bool(_runtime_vars['_debug_mode']) +debug_mode = is_debug_mode + + def is_root_process() -> bool: return _runtime_vars['_is_root'] @@ -143,3 +170,34 @@ def current_ipc_ctx( f'|_{current_task()}\n' ) return ctx + + +# std ODE (mutable) app state location +_rtdir: Path = Path(os.environ['XDG_RUNTIME_DIR']) + + +def get_rt_dir( + subdir: str = 'tractor' +) -> Path: + ''' + Return the user "runtime dir" where most userspace apps stick + their IPC and cache related system util-files; we take hold + of a `'XDG_RUNTIME_DIR'/tractor/` subdir by default. + + ''' + rtdir: Path = _rtdir / subdir + if not rtdir.is_dir(): + rtdir.mkdir() + return rtdir + + +def current_ipc_protos() -> list[str]: + ''' + Return the list of IPC transport protocol keys currently + in use by this actor. + + The keys are as declared by `MsgTransport` and `Address` + concrete-backend sub-types defined throughout `tractor.ipc`. + + ''' + return _runtime_vars['_enable_tpts'] diff --git a/tractor/_streaming.py b/tractor/_streaming.py index 2ff2d41c..4683f35d 100644 --- a/tractor/_streaming.py +++ b/tractor/_streaming.py @@ -56,7 +56,7 @@ from tractor.msg import ( if TYPE_CHECKING: from ._runtime import Actor from ._context import Context - from ._ipc import Channel + from .ipc import Channel log = get_logger(__name__) @@ -426,8 +426,8 @@ class MsgStream(trio.abc.Channel): self._closed = re # if caught_eoc: - # # from .devx import _debug - # # await _debug.pause() + # # from .devx import debug + # # await debug.pause() # with trio.CancelScope(shield=True): # await rx_chan.aclose() @@ -437,22 +437,23 @@ class MsgStream(trio.abc.Channel): message: str = ( f'Stream self-closed by {this_side!r}-side before EoC from {peer_side!r}\n' # } bc a stream is a "scope"/msging-phase inside an IPC - f'x}}>\n' + f'c}}>\n' f' |_{self}\n' ) - log.cancel(message) - self._eoc = trio.EndOfChannel(message) - if ( (rx_chan := self._rx_chan) and (stats := rx_chan.statistics()).tasks_waiting_receive ): - log.cancel( - f'Msg-stream is closing but there is still reader tasks,\n' + message += ( + f'AND there is still reader tasks,\n' + f'\n' f'{stats}\n' ) + log.cancel(message) + self._eoc = trio.EndOfChannel(message) + # ?XXX WAIT, why do we not close the local mem chan `._rx_chan` XXX? # => NO, DEFINITELY NOT! <= # if we're a bi-dir `MsgStream` BECAUSE this same @@ -595,8 +596,17 @@ class MsgStream(trio.abc.Channel): trio.ClosedResourceError, trio.BrokenResourceError, BrokenPipeError, - ) as trans_err: - if hide_tb: + ) as _trans_err: + trans_err = _trans_err + if ( + hide_tb + and + self._ctx.chan._exc is trans_err + # ^XXX, IOW, only if the channel is marked errored + # for the same reason as whatever its underlying + # transport raised, do we keep the full low-level tb + # suppressed from the user. + ): raise type(trans_err)( *trans_err.args ) from trans_err @@ -802,13 +812,12 @@ async def open_stream_from_ctx( # sanity, can remove? assert eoc is stream._eoc - log.warning( + log.runtime( 'Stream was terminated by EoC\n\n' # NOTE: won't show the error but # does show txt followed by IPC msg. f'{str(eoc)}\n' ) - finally: if ctx._portal: try: diff --git a/tractor/_supervise.py b/tractor/_supervise.py index bc6bc983..be89c4cb 100644 --- a/tractor/_supervise.py +++ b/tractor/_supervise.py @@ -21,34 +21,49 @@ from contextlib import asynccontextmanager as acm from functools import partial import inspect -from pprint import pformat -from typing import TYPE_CHECKING +from typing import ( + TYPE_CHECKING, +) import typing import warnings import trio -from .devx._debug import maybe_wait_for_debugger + +from .devx import ( + debug, + pformat as _pformat, +) +from ._addr import ( + UnwrappedAddress, + mk_uuid, +) from ._state import current_actor, is_main_process from .log import get_logger, get_loglevel from ._runtime import Actor from ._portal import Portal -from ._exceptions import ( +from .trionics import ( is_multi_cancelled, + collapse_eg, +) +from ._exceptions import ( ContextCancelled, ) -from ._root import open_root_actor +from ._root import ( + open_root_actor, +) from . import _state from . import _spawn if TYPE_CHECKING: import multiprocessing as mp + # from .ipc._server import IPCServer + from .ipc import IPCServer + log = get_logger(__name__) -_default_bind_addr: tuple[str, int] = ('127.0.0.1', 0) - class ActorNursery: ''' @@ -102,7 +117,6 @@ class ActorNursery: ] ] = {} - self.cancelled: bool = False self._join_procs = trio.Event() self._at_least_one_child_in_debug: bool = False self.errors = errors @@ -120,18 +134,62 @@ class ActorNursery: # TODO: remove the `.run_in_actor()` API and thus this 2ndary # nursery when that API get's moved outside this primitive! self._ria_nursery = ria_nursery + + # TODO, factor this into a .hilevel api! + # # portals spawned with ``run_in_actor()`` are # cancelled when their "main" result arrives self._cancel_after_result_on_exit: set = set() + # trio.Nursery-like cancel (request) statuses + self._cancelled_caught: bool = False + self._cancel_called: bool = False + + @property + def cancel_called(self) -> bool: + ''' + Records whether cancellation has been requested for this + actor-nursery by a call to `.cancel()` either due to, + - an explicit call by some actor-local-task, + - an implicit call due to an error/cancel emited inside + the `tractor.open_nursery()` block. + + ''' + return self._cancel_called + + @property + def cancelled_caught(self) -> bool: + ''' + Set when this nursery was able to cance all spawned subactors + gracefully via an (implicit) call to `.cancel()`. + + ''' + return self._cancelled_caught + + # TODO! remove internal/test-suite usage! + @property + def cancelled(self) -> bool: + warnings.warn( + "`ActorNursery.cancelled` is now deprecated, use " + " `.cancel_called` instead.", + DeprecationWarning, + stacklevel=2, + ) + return ( + self._cancel_called + # and + # self._cancelled_caught + ) + async def start_actor( self, name: str, *, - bind_addrs: list[tuple[str, int]] = [_default_bind_addr], + bind_addrs: list[UnwrappedAddress]|None = None, rpc_module_paths: list[str]|None = None, + enable_transports: list[str] = [_state._def_tpt_proto], enable_modules: list[str]|None = None, loglevel: str|None = None, # set log level per subactor debug_mode: bool|None = None, @@ -141,6 +199,7 @@ class ActorNursery: # a `._ria_nursery` since the dependent APIs have been # removed! nursery: trio.Nursery|None = None, + proc_kwargs: dict[str, any] = {} ) -> Portal: ''' @@ -177,15 +236,17 @@ class ActorNursery: enable_modules.extend(rpc_module_paths) subactor = Actor( - name, + name=name, + uuid=mk_uuid(), + # modules allowed to invoked funcs from enable_modules=enable_modules, loglevel=loglevel, # verbatim relay this actor's registrar addresses - registry_addrs=current_actor().reg_addrs, + registry_addrs=current_actor().registry_addrs, ) - parent_addr = self._actor.accept_addr + parent_addr: UnwrappedAddress = self._actor.accept_addr assert parent_addr # start a task to spawn a process @@ -204,6 +265,7 @@ class ActorNursery: parent_addr, _rtv, # run time vars infect_asyncio=infect_asyncio, + proc_kwargs=proc_kwargs ) ) @@ -222,11 +284,12 @@ class ActorNursery: *, name: str | None = None, - bind_addrs: tuple[str, int] = [_default_bind_addr], + bind_addrs: UnwrappedAddress|None = None, rpc_module_paths: list[str] | None = None, enable_modules: list[str] | None = None, loglevel: str | None = None, # set log level per subactor infect_asyncio: bool = False, + proc_kwargs: dict[str, any] = {}, **kwargs, # explicit args to ``fn`` @@ -257,6 +320,7 @@ class ActorNursery: # use the run_in_actor nursery nursery=self._ria_nursery, infect_asyncio=infect_asyncio, + proc_kwargs=proc_kwargs ) # XXX: don't allow stream funcs @@ -294,15 +358,21 @@ class ActorNursery: ''' __runtimeframe__: int = 1 # noqa - self.cancelled = True + self._cancel_called = True # TODO: impl a repr for spawn more compact # then `._children`.. children: dict = self._children child_count: int = len(children) msg: str = f'Cancelling actor nursery with {child_count} children\n' + + server: IPCServer = self._actor.ipc_server + with trio.move_on_after(3) as cs: - async with trio.open_nursery() as tn: + async with ( + collapse_eg(), + trio.open_nursery() as tn, + ): subactor: Actor proc: trio.Process @@ -321,7 +391,7 @@ class ActorNursery: else: if portal is None: # actor hasn't fully spawned yet - event = self._actor._peer_connected[subactor.uid] + event: trio.Event = server._peer_connected[subactor.uid] log.warning( f"{subactor.uid} never 't finished spawning?" ) @@ -337,7 +407,7 @@ class ActorNursery: if portal is None: # cancelled while waiting on the event # to arrive - chan = self._actor._peers[subactor.uid][-1] + chan = server._peers[subactor.uid][-1] if chan: portal = Portal(chan) else: # there's no other choice left @@ -366,6 +436,8 @@ class ActorNursery: ) in children.values(): log.warning(f"Hard killing process {proc}") proc.terminate() + else: + self._cancelled_caught # mark ourselves as having (tried to have) cancelled all subactors self._join_procs.set() @@ -395,10 +467,10 @@ async def _open_and_supervise_one_cancels_all_nursery( # `ActorNursery.start_actor()`). # errors from this daemon actor nursery bubble up to caller - async with trio.open_nursery( - strict_exception_groups=False, - # ^XXX^ TODO? instead unpack any RAE as per "loose" style? - ) as da_nursery: + async with ( + collapse_eg(), + trio.open_nursery() as da_nursery, + ): try: # This is the inner level "run in actor" nursery. It is # awaited first since actors spawned in this way (using @@ -408,11 +480,10 @@ async def _open_and_supervise_one_cancels_all_nursery( # immediately raised for handling by a supervisor strategy. # As such if the strategy propagates any error(s) upwards # the above "daemon actor" nursery will be notified. - async with trio.open_nursery( - strict_exception_groups=False, - # ^XXX^ TODO? instead unpack any RAE as per "loose" style? - ) as ria_nursery: - + async with ( + collapse_eg(), + trio.open_nursery() as ria_nursery, + ): an = ActorNursery( actor, ria_nursery, @@ -429,7 +500,7 @@ async def _open_and_supervise_one_cancels_all_nursery( # the "hard join phase". log.runtime( 'Waiting on subactors to complete:\n' - f'{pformat(an._children)}\n' + f'>}} {len(an._children)}\n' ) an._join_procs.set() @@ -443,7 +514,7 @@ async def _open_and_supervise_one_cancels_all_nursery( # will make the pdb repl unusable. # Instead try to wait for pdb to be released before # tearing down. - await maybe_wait_for_debugger( + await debug.maybe_wait_for_debugger( child_in_debug=an._at_least_one_child_in_debug ) @@ -519,7 +590,7 @@ async def _open_and_supervise_one_cancels_all_nursery( # XXX: yet another guard before allowing the cancel # sequence in case a (single) child is in debug. - await maybe_wait_for_debugger( + await debug.maybe_wait_for_debugger( child_in_debug=an._at_least_one_child_in_debug ) @@ -568,9 +639,15 @@ async def _open_and_supervise_one_cancels_all_nursery( # final exit +_shutdown_msg: str = ( + 'Actor-runtime-shutdown' +) + + @acm # @api_frame async def open_nursery( + *, # named params only! hide_tb: bool = True, **kwargs, # ^TODO, paramspec for `open_root_actor()` @@ -655,17 +732,26 @@ async def open_nursery( ): __tracebackhide__: bool = False - msg: str = ( - 'Actor-nursery exited\n' - f'|_{an}\n' + + op_nested_an_repr: str = _pformat.nest_from_op( + input_op=')>', + text=f'{an}', + # nest_prefix='|_', + nest_indent=1, # under > ) + an_msg: str = ( + f'Actor-nursery exited\n' + f'{op_nested_an_repr}\n' + ) + # keep noise low during std operation. + log.runtime(an_msg) if implicit_runtime: # shutdown runtime if it was started and report noisly # that we're did so. - msg += '=> Shutting down actor runtime <=\n' + msg: str = ( + '\n' + '\n' + f'{_shutdown_msg} )>\n' + ) log.info(msg) - - else: - # keep noise low during std operation. - log.runtime(msg) diff --git a/tractor/_testing/__init__.py b/tractor/_testing/__init__.py index 88860d13..8b906d11 100644 --- a/tractor/_testing/__init__.py +++ b/tractor/_testing/__init__.py @@ -26,7 +26,7 @@ import os import pathlib import tractor -from tractor.devx._debug import ( +from tractor.devx.debug import ( BoxedMaybeException, ) from .pytest import ( @@ -37,6 +37,9 @@ from .fault_simulation import ( ) +# TODO, use dulwhich for this instead? +# -> we're going to likely need it (or something similar) +# for supporting hot-coad reload feats eventually anyway! def repodir() -> pathlib.Path: ''' Return the abspath to the repo directory. diff --git a/tractor/_testing/addr.py b/tractor/_testing/addr.py new file mode 100644 index 00000000..1b066336 --- /dev/null +++ b/tractor/_testing/addr.py @@ -0,0 +1,70 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +Random IPC addr generation for isolating +the discovery space between test sessions. + +Might be eventually useful to expose as a util set from +our `tractor.discovery` subsys? + +''' +import random +from typing import ( + Type, +) +from tractor import ( + _addr, +) + + +def get_rando_addr( + tpt_proto: str, + *, + + # choose random port at import time + _rando_port: str = random.randint(1000, 9999) + +) -> tuple[str, str|int]: + ''' + Used to globally override the runtime to the + per-test-session-dynamic addr so that all tests never conflict + with any other actor tree using the default. + + ''' + addr_type: Type[_addr.Addres] = _addr._address_types[tpt_proto] + def_reg_addr: tuple[str, int] = _addr._default_lo_addrs[tpt_proto] + + # this is the "unwrapped" form expected to be passed to + # `.open_root_actor()` by test body. + testrun_reg_addr: tuple[str, int|str] + match tpt_proto: + case 'tcp': + testrun_reg_addr = ( + addr_type.def_bindspace, + _rando_port, + ) + + # NOTE, file-name uniqueness (no-collisions) will be based on + # the runtime-directory and root (pytest-proc's) pid. + case 'uds': + testrun_reg_addr = addr_type.get_random().unwrap() + + # XXX, as sanity it should never the same as the default for the + # host-singleton registry actor. + assert def_reg_addr != testrun_reg_addr + + return testrun_reg_addr diff --git a/tractor/_testing/pytest.py b/tractor/_testing/pytest.py index 93eeaf72..1a2f63ab 100644 --- a/tractor/_testing/pytest.py +++ b/tractor/_testing/pytest.py @@ -26,29 +26,46 @@ from functools import ( import inspect import platform +import pytest import tractor import trio def tractor_test(fn): ''' - Decorator for async test funcs to present them as "native" - looking sync funcs runnable by `pytest` using `trio.run()`. + Decorator for async test fns to decorator-wrap them as "native" + looking sync funcs runnable by `pytest` and auto invoked with + `trio.run()` (much like the `pytest-trio` plugin's approach). - Use: + Further the test fn body will be invoked AFTER booting the actor + runtime, i.e. from inside a `tractor.open_root_actor()` block AND + with various runtime and tooling parameters implicitly passed as + requested by by the test session's config; see immediately below. - @tractor_test - async def test_whatever(): - await ... + Basic deco use: + --------------- - If fixtures: + @tractor_test + async def test_whatever(): + await ... - - ``reg_addr`` (a socket addr tuple where arbiter is listening) - - ``loglevel`` (logging level passed to tractor internals) - - ``start_method`` (subprocess spawning backend) - are defined in the `pytest` fixture space they will be automatically - injected to tests declaring these funcargs. + Runtime config via special fixtures: + ------------------------------------ + If any of the following fixture are requested by the wrapped test + fn (via normal func-args declaration), + + - `reg_addr` (a socket addr tuple where arbiter is listening) + - `loglevel` (logging level passed to tractor internals) + - `start_method` (subprocess spawning backend) + + (TODO support) + - `tpt_proto` (IPC transport protocol key) + + they will be automatically injected to each test as normally + expected as well as passed to the initial + `tractor.open_root_actor()` funcargs. + ''' @wraps(fn) def wrapper( @@ -111,3 +128,164 @@ def tractor_test(fn): return trio.run(main) return wrapper + + +def pytest_addoption( + parser: pytest.Parser, +): + # parser.addoption( + # "--ll", + # action="store", + # dest='loglevel', + # default='ERROR', help="logging level to set when testing" + # ) + + parser.addoption( + "--spawn-backend", + action="store", + dest='spawn_backend', + default='trio', + help="Processing spawning backend to use for test run", + ) + + parser.addoption( + "--tpdb", + "--debug-mode", + action="store_true", + dest='tractor_debug_mode', + # default=False, + help=( + 'Enable a flag that can be used by tests to to set the ' + '`debug_mode: bool` for engaging the internal ' + 'multi-proc debugger sys.' + ), + ) + + # provide which IPC transport protocols opting-in test suites + # should accumulatively run against. + parser.addoption( + "--tpt-proto", + nargs='+', # accumulate-multiple-args + action="store", + dest='tpt_protos', + default=['tcp'], + help="Transport protocol to use under the `tractor.ipc.Channel`", + ) + + +def pytest_configure(config): + backend = config.option.spawn_backend + tractor._spawn.try_set_start_method(backend) + + +@pytest.fixture(scope='session') +def debug_mode(request) -> bool: + ''' + Flag state for whether `--tpdb` (for `tractor`-py-debugger) + was passed to the test run. + + Normally tests should pass this directly to `.open_root_actor()` + to allow the user to opt into suite-wide crash handling. + + ''' + debug_mode: bool = request.config.option.tractor_debug_mode + return debug_mode + + +@pytest.fixture(scope='session') +def spawn_backend(request) -> str: + return request.config.option.spawn_backend + + +@pytest.fixture(scope='session') +def tpt_protos(request) -> list[str]: + + # allow quoting on CLI + proto_keys: list[str] = [ + proto_key.replace('"', '').replace("'", "") + for proto_key in request.config.option.tpt_protos + ] + + # ?TODO, eventually support multiple protos per test-sesh? + if len(proto_keys) > 1: + pytest.fail( + 'We only support one `--tpt-proto ` atm!\n' + ) + + # XXX ensure we support the protocol by name via lookup! + for proto_key in proto_keys: + addr_type = tractor._addr._address_types[proto_key] + assert addr_type.proto_key == proto_key + + yield proto_keys + + +@pytest.fixture( + scope='session', + autouse=True, +) +def tpt_proto( + tpt_protos: list[str], +) -> str: + proto_key: str = tpt_protos[0] + + from tractor import _state + if _state._def_tpt_proto != proto_key: + _state._def_tpt_proto = proto_key + + yield proto_key + + +@pytest.fixture(scope='session') +def reg_addr( + tpt_proto: str, +) -> tuple[str, int|str]: + ''' + Deliver a test-sesh unique registry address such + that each run's (tests which use this fixture) will + have no conflicts/cross-talk when running simultaneously + nor will interfere with other live `tractor` apps active + on the same network-host (namespace). + + ''' + from tractor._testing.addr import get_rando_addr + return get_rando_addr( + tpt_proto=tpt_proto, + ) + + +def pytest_generate_tests( + metafunc: pytest.Metafunc, +): + spawn_backend: str = metafunc.config.option.spawn_backend + + if not spawn_backend: + # XXX some weird windows bug with `pytest`? + spawn_backend = 'trio' + + # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? + assert spawn_backend in ( + 'mp_spawn', + 'mp_forkserver', + 'trio', + ) + + # NOTE: used-to-be-used-to dyanmically parametrize tests for when + # you just passed --spawn-backend=`mp` on the cli, but now we expect + # that cli input to be manually specified, BUT, maybe we'll do + # something like this again in the future? + if 'start_method' in metafunc.fixturenames: + metafunc.parametrize( + "start_method", + [spawn_backend], + scope='module', + ) + + # TODO, parametrize any `tpt_proto: str` declaring tests! + # proto_tpts: list[str] = metafunc.config.option.proto_tpts + # if 'tpt_proto' in metafunc.fixturenames: + # metafunc.parametrize( + # 'tpt_proto', + # proto_tpts, # TODO, double check this list usage! + # scope='module', + # ) diff --git a/tractor/_testing/samples.py b/tractor/_testing/samples.py new file mode 100644 index 00000000..a87a22c4 --- /dev/null +++ b/tractor/_testing/samples.py @@ -0,0 +1,35 @@ +import os +import random + + +def generate_sample_messages( + amount: int, + rand_min: int = 0, + rand_max: int = 0, + silent: bool = False +) -> tuple[list[bytes], int]: + + msgs = [] + size = 0 + + if not silent: + print(f'\ngenerating {amount} messages...') + + for i in range(amount): + msg = f'[{i:08}]'.encode('utf-8') + + if rand_max > 0: + msg += os.urandom( + random.randint(rand_min, rand_max)) + + size += len(msg) + + msgs.append(msg) + + if not silent and i and i % 10_000 == 0: + print(f'{i} generated') + + if not silent: + print(f'done, {size:,} bytes in total') + + return msgs, size diff --git a/tractor/devx/__init__.py b/tractor/devx/__init__.py index 7047dbdb..80c6744f 100644 --- a/tractor/devx/__init__.py +++ b/tractor/devx/__init__.py @@ -20,7 +20,7 @@ Runtime "developer experience" utils and addons to aid our and working with/on the actor runtime. """ -from ._debug import ( +from .debug import ( maybe_wait_for_debugger as maybe_wait_for_debugger, acquire_debug_lock as acquire_debug_lock, breakpoint as breakpoint, diff --git a/tractor/devx/_debug.py b/tractor/devx/_debug.py deleted file mode 100644 index c6ca1d89..00000000 --- a/tractor/devx/_debug.py +++ /dev/null @@ -1,3303 +0,0 @@ -# tractor: structured concurrent "actors". -# Copyright 2018-eternity Tyler Goodlet. - -# This program is free software: you can redistribute it and/or -# modify it under the terms of the GNU Affero General Public License -# as published by the Free Software Foundation, either version 3 of -# the License, or (at your option) any later version. - -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public -# License along with this program. If not, see -# . - -""" -Multi-core debugging for da peeps! - -""" -from __future__ import annotations -import asyncio -import bdb -from contextlib import ( - asynccontextmanager as acm, - contextmanager as cm, - nullcontext, - _GeneratorContextManager, - _AsyncGeneratorContextManager, -) -from functools import ( - partial, - cached_property, -) -import inspect -import os -import signal -import sys -import textwrap -import threading -import traceback -from typing import ( - Any, - Callable, - AsyncIterator, - AsyncGenerator, - TypeAlias, - TYPE_CHECKING, -) -from types import ( - FunctionType, - FrameType, - ModuleType, - TracebackType, - CodeType, -) - -from msgspec import Struct -import pdbp -import sniffio -import trio -from trio import CancelScope -from trio.lowlevel import ( - current_task, -) -from trio import ( - TaskStatus, -) -import tractor -from tractor.to_asyncio import run_trio_task_in_future -from tractor.log import get_logger -from tractor._context import Context -from tractor import _state -from tractor._exceptions import ( - InternalError, - NoRuntime, - is_multi_cancelled, -) -from tractor._state import ( - current_actor, - is_root_process, - debug_mode, - current_ipc_ctx, -) -# from .pformat import ( -# pformat_caller_frame, -# pformat_cs, -# ) - -if TYPE_CHECKING: - from trio.lowlevel import Task - from threading import Thread - from tractor._ipc import Channel - from tractor._runtime import ( - Actor, - ) - -log = get_logger(__name__) - -# TODO: refine the internal impl and APIs in this module! -# -# -[ ] rework `._pause()` and it's branch-cases for root vs. -# subactor: -# -[ ] `._pause_from_root()` + `_pause_from_subactor()`? -# -[ ] do the de-factor based on bg-thread usage in -# `.pause_from_sync()` & `_pause_from_bg_root_thread()`. -# -[ ] drop `debug_func == None` case which is confusing af.. -# -[ ] factor out `_enter_repl_sync()` into a util func for calling -# the `_set_trace()` / `_post_mortem()` APIs? -# -# -[ ] figure out if we need `acquire_debug_lock()` and/or re-implement -# it as part of the `.pause_from_sync()` rework per above? -# -# -[ ] pair the `._pause_from_subactor()` impl with a "debug nursery" -# that's dynamically allocated inside the `._rpc` task thus -# avoiding the `._service_n.start()` usage for the IPC request? -# -[ ] see the TODO inside `._rpc._errors_relayed_via_ipc()` -# -# -[ ] impl a `open_debug_request()` which encaps all -# `request_root_stdio_lock()` task scheduling deats -# + `DebugStatus` state mgmt; which should prolly be re-branded as -# a `DebugRequest` type anyway AND with suppoort for bg-thread -# (from root actor) usage? -# -# -[ ] handle the `xonsh` case for bg-root-threads in the SIGINT -# handler! -# -[ ] do we need to do the same for subactors? -# -[ ] make the failing tests finally pass XD -# -# -[ ] simplify `maybe_wait_for_debugger()` to be a root-task only -# API? -# -[ ] currently it's implemented as that so might as well make it -# formal? - - -def hide_runtime_frames() -> dict[FunctionType, CodeType]: - ''' - Hide call-stack frames for various std-lib and `trio`-API primitives - such that the tracebacks presented from our runtime are as minimized - as possible, particularly from inside a `PdbREPL`. - - ''' - # XXX HACKZONE XXX - # hide exit stack frames on nurseries and cancel-scopes! - # |_ so avoid seeing it when the `pdbp` REPL is first engaged from - # inside a `trio.open_nursery()` scope (with no line after it - # in before the block end??). - # - # TODO: FINALLY got this workin originally with - # `@pdbp.hideframe` around the `wrapper()` def embedded inside - # `_ki_protection_decoratior()`.. which is in the module: - # /home/goodboy/.virtualenvs/tractor311/lib/python3.11/site-packages/trio/_core/_ki.py - # - # -[ ] make an issue and patch for `trio` core? maybe linked - # to the long outstanding `pdb` one below? - # |_ it's funny that there's frame hiding throughout `._run.py` - # but not where it matters on the below exit funcs.. - # - # -[ ] provide a patchset for the lonstanding - # |_ https://github.com/python-trio/trio/issues/1155 - # - # -[ ] make a linked issue to ^ and propose allowing all the - # `._core._run` code to have their `__tracebackhide__` value - # configurable by a `RunVar` to allow getting scheduler frames - # if desired through configuration? - # - # -[ ] maybe dig into the core `pdb` issue why the extra frame is shown - # at all? - # - funcs: list[FunctionType] = [ - trio._core._run.NurseryManager.__aexit__, - trio._core._run.CancelScope.__exit__, - _GeneratorContextManager.__exit__, - _AsyncGeneratorContextManager.__aexit__, - _AsyncGeneratorContextManager.__aenter__, - trio.Event.wait, - ] - func_list_str: str = textwrap.indent( - "\n".join(f.__qualname__ for f in funcs), - prefix=' |_ ', - ) - log.devx( - 'Hiding the following runtime frames by default:\n' - f'{func_list_str}\n' - ) - - codes: dict[FunctionType, CodeType] = {} - for ref in funcs: - # stash a pre-modified version of each ref's code-obj - # so it can be reverted later if needed. - codes[ref] = ref.__code__ - pdbp.hideframe(ref) - # - # pdbp.hideframe(trio._core._run.NurseryManager.__aexit__) - # pdbp.hideframe(trio._core._run.CancelScope.__exit__) - # pdbp.hideframe(_GeneratorContextManager.__exit__) - # pdbp.hideframe(_AsyncGeneratorContextManager.__aexit__) - # pdbp.hideframe(_AsyncGeneratorContextManager.__aenter__) - # pdbp.hideframe(trio.Event.wait) - return codes - - -class LockStatus( - Struct, - tag=True, - tag_field='msg_type', -): - subactor_uid: tuple[str, str] - cid: str - locked: bool - - -class LockRelease( - Struct, - tag=True, - tag_field='msg_type', -): - subactor_uid: tuple[str, str] - cid: str - - -__pld_spec__: TypeAlias = LockStatus|LockRelease - - -# TODO: instantiate this only in root from factory -# so as to allow runtime errors from subactors. -class Lock: - ''' - Actor-tree-global debug lock state, exists only in a root process. - - Mostly to avoid a lot of global declarations for now XD. - - ''' - @staticmethod - def get_locking_task_cs() -> CancelScope|None: - if not is_root_process(): - raise RuntimeError( - '`Lock.locking_task_cs` is invalid in subactors!' - ) - - if ctx := Lock.ctx_in_debug: - return ctx._scope - - return None - - # TODO: once we convert to singleton-per-actor-style - # @property - # def stats(cls) -> trio.LockStatistics: - # return cls._debug_lock.statistics() - - # @property - # def owner(cls) -> Task: - # return cls._debug_lock.statistics().owner - - # ROOT ONLY - # ------ - ------- - # the root-actor-ONLY singletons for, - # - # - the uid of the actor who's task is using a REPL - # - a literal task-lock, - # - a shielded-cancel-scope around the acquiring task*, - # - a broadcast event to signal no-actor using a REPL in tree, - # - a filter list to block subs-by-uid from locking. - # - # * in case it needs to be manually cancelled in root due to - # a stale lock condition (eg. IPC failure with the locking - # child - ctx_in_debug: Context|None = None - req_handler_finished: trio.Event|None = None - - _owned_by_root: bool = False - _debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock() - _blocked: set[ - tuple[str, str] # `Actor.uid` for per actor - |str # Context.cid for per task - ] = set() - - @classmethod - def repr(cls) -> str: - lock_stats: trio.LockStatistics = cls._debug_lock.statistics() - req: trio.Event|None = cls.req_handler_finished - fields: str = ( - f'|_ ._blocked: {cls._blocked}\n' - f'|_ ._debug_lock: {cls._debug_lock}\n' - f' {lock_stats}\n\n' - - f'|_ .ctx_in_debug: {cls.ctx_in_debug}\n' - f'|_ .req_handler_finished: {req}\n' - ) - if req: - req_stats: trio.EventStatistics = req.statistics() - fields += f' {req_stats}\n' - - body: str = textwrap.indent( - fields, - prefix=' ', - ) - return ( - f'<{cls.__name__}(\n' - f'{body}' - ')>\n\n' - ) - - @classmethod - # @pdbp.hideframe - def release( - cls, - raise_on_thread: bool = True, - - ) -> bool: - ''' - Release the actor-tree global TTY stdio lock (only) from the - `trio.run()`-main-thread. - - ''' - we_released: bool = False - ctx_in_debug: Context|None = cls.ctx_in_debug - repl_task: Task|Thread|None = DebugStatus.repl_task - try: - if not DebugStatus.is_main_trio_thread(): - thread: threading.Thread = threading.current_thread() - message: str = ( - '`Lock.release()` can not be called from a non-main-`trio` thread!\n' - f'{thread}\n' - ) - if raise_on_thread: - raise RuntimeError(message) - - log.devx(message) - return False - - task: Task = current_task() - message: str = ( - 'TTY NOT RELEASED on behalf of caller\n' - f'|_{task}\n' - ) - - # sanity check that if we're the root actor - # the lock is marked as such. - # note the pre-release value may be diff the the - # post-release task. - if repl_task is task: - assert cls._owned_by_root - message: str = ( - 'TTY lock held by root-actor on behalf of local task\n' - f'|_{repl_task}\n' - ) - else: - assert DebugStatus.repl_task is not task - - lock: trio.StrictFIFOLock = cls._debug_lock - owner: Task = lock.statistics().owner - if ( - lock.locked() - and - (owner is task) - # ^-NOTE-^ if we do NOT ensure this, `trio` will - # raise a RTE when a non-owner tries to releasee the - # lock. - # - # Further we need to be extra pedantic about the - # correct task, greenback-spawned-task and/or thread - # being set to the `.repl_task` such that the above - # condition matches and we actually release the lock. - # - # This is particular of note from `.pause_from_sync()`! - ): - cls._debug_lock.release() - we_released: bool = True - if repl_task: - message: str = ( - 'TTY released on behalf of root-actor-local REPL owner\n' - f'|_{repl_task}\n' - ) - else: - message: str = ( - 'TTY released by us on behalf of remote peer?\n' - f'{ctx_in_debug}\n' - ) - - except RuntimeError as rte: - log.exception( - 'Failed to release `Lock._debug_lock: trio.FIFOLock`?\n' - ) - raise rte - - finally: - # IFF there are no more requesting tasks queued up fire, the - # "tty-unlocked" event thereby alerting any monitors of the lock that - # we are now back in the "tty unlocked" state. This is basically - # and edge triggered signal around an empty queue of sub-actor - # tasks that may have tried to acquire the lock. - lock_stats: trio.LockStatistics = cls._debug_lock.statistics() - req_handler_finished: trio.Event|None = Lock.req_handler_finished - if ( - not lock_stats.owner - and - req_handler_finished is None - ): - message += ( - '-> No new task holds the TTY lock!\n\n' - f'{Lock.repr()}\n' - ) - - elif ( - req_handler_finished # new IPC ctx debug request active - and - lock.locked() # someone has the lock - ): - behalf_of_task = ( - ctx_in_debug - or - repl_task - ) - message += ( - f'A non-caller task still owns this lock on behalf of\n' - f'{behalf_of_task}\n' - f'lock owner task: {lock_stats.owner}\n' - ) - - if ( - we_released - and - ctx_in_debug - ): - cls.ctx_in_debug = None # unset - - # post-release value (should be diff then value above!) - repl_task: Task|Thread|None = DebugStatus.repl_task - if ( - cls._owned_by_root - and - we_released - ): - cls._owned_by_root = False - - if task is not repl_task: - message += ( - 'Lock released by root actor on behalf of bg thread\n' - f'|_{repl_task}\n' - ) - - if message: - log.devx(message) - - return we_released - - @classmethod - @acm - async def acquire_for_ctx( - cls, - ctx: Context, - - ) -> AsyncIterator[trio.StrictFIFOLock]: - ''' - Acquire a root-actor local FIFO lock which tracks mutex access of - the process tree's global debugger breakpoint. - - This lock avoids tty clobbering (by preventing multiple processes - reading from stdstreams) and ensures multi-actor, sequential access - to the ``pdb`` repl. - - ''' - if not is_root_process(): - raise RuntimeError('Only callable by a root actor task!') - - # subactor_uid: tuple[str, str] = ctx.chan.uid - we_acquired: bool = False - log.runtime( - f'Attempting to acquire TTY lock for sub-actor\n' - f'{ctx}' - ) - try: - pre_msg: str = ( - f'Entering lock checkpoint for sub-actor\n' - f'{ctx}' - ) - stats = cls._debug_lock.statistics() - if owner := stats.owner: - pre_msg += ( - f'\n' - f'`Lock` already held by local task?\n' - f'{owner}\n\n' - # f'On behalf of task: {cls.remote_task_in_debug!r}\n' - f'On behalf of IPC ctx\n' - f'{ctx}' - ) - log.runtime(pre_msg) - - # NOTE: if the surrounding cancel scope from the - # `lock_stdio_for_peer()` caller is cancelled, this line should - # unblock and NOT leave us in some kind of - # a "child-locked-TTY-but-child-is-uncontactable-over-IPC" - # condition. - await cls._debug_lock.acquire() - cls.ctx_in_debug = ctx - we_acquired = True - - log.runtime( - f'TTY lock acquired for sub-actor\n' - f'{ctx}' - ) - - # NOTE: critical section: this yield is unshielded! - # - # IF we received a cancel during the shielded lock entry of some - # next-in-queue requesting task, then the resumption here will - # result in that ``trio.Cancelled`` being raised to our caller - # (likely from `lock_stdio_for_peer()` below)! In - # this case the ``finally:`` below should trigger and the - # surrounding caller side context should cancel normally - # relaying back to the caller. - - yield cls._debug_lock - - finally: - message :str = 'Exiting `Lock.acquire_for_ctx()` on behalf of sub-actor\n' - if we_acquired: - cls.release() - message += '-> TTY lock released by child\n' - - else: - message += '-> TTY lock never acquired by child??\n' - - log.runtime( - f'{message}\n' - f'{ctx}' - ) - - -def get_lock() -> Lock: - return Lock - - -@tractor.context( - # enable the locking msgspec - pld_spec=__pld_spec__, -) -async def lock_stdio_for_peer( - ctx: Context, - subactor_task_uid: tuple[str, int], - -) -> LockStatus|LockRelease: - ''' - Lock the TTY in the root process of an actor tree in a new - inter-actor-context-task such that the ``pdbp`` debugger console - can be mutex-allocated to the calling sub-actor for REPL control - without interference by other processes / threads. - - NOTE: this task must be invoked in the root process of the actor - tree. It is meant to be invoked as an rpc-task and should be - highly reliable at releasing the mutex complete! - - ''' - subactor_uid: tuple[str, str] = ctx.chan.uid - - # mark the tty lock as being in use so that the runtime - # can try to avoid clobbering any connection from a child - # that's currently relying on it. - we_finished = Lock.req_handler_finished = trio.Event() - lock_blocked: bool = False - try: - if ctx.cid in Lock._blocked: - raise RuntimeError( - f'Double lock request!?\n' - f'The same remote task already has an active request for TTY lock ??\n\n' - f'subactor uid: {subactor_uid}\n\n' - - 'This might be mean that the requesting task ' - 'in `request_root_stdio_lock()` may have crashed?\n' - 'Consider that an internal bug exists given the TTY ' - '`Lock`ing IPC dialog..\n' - ) - Lock._blocked.add(ctx.cid) - lock_blocked = True - root_task_name: str = current_task().name - if tuple(subactor_uid) in Lock._blocked: - log.warning( - f'Subactor is blocked from acquiring debug lock..\n' - f'subactor_uid: {subactor_uid}\n' - f'remote task: {subactor_task_uid}\n' - ) - ctx._enter_debugger_on_cancel: bool = False - message: str = ( - f'Debug lock blocked for subactor\n\n' - f'x)<= {subactor_uid}\n\n' - - f'Likely because the root actor already started shutdown and is ' - 'closing IPC connections for this child!\n\n' - 'Cancelling debug request!\n' - ) - log.cancel(message) - await ctx.cancel() - raise DebugRequestError(message) - - log.devx( - 'Subactor attempting to acquire TTY lock\n' - f'root task: {root_task_name}\n' - f'subactor_uid: {subactor_uid}\n' - f'remote task: {subactor_task_uid}\n' - ) - DebugStatus.shield_sigint() - - # NOTE: we use the IPC ctx's cancel scope directly in order to - # ensure that on any transport failure, or cancellation request - # from the child we expect - # `Context._maybe_cancel_and_set_remote_error()` to cancel this - # scope despite the shielding we apply below. - debug_lock_cs: CancelScope = ctx._scope - - async with Lock.acquire_for_ctx(ctx=ctx): - debug_lock_cs.shield = True - - log.devx( - 'Subactor acquired debugger request lock!\n' - f'root task: {root_task_name}\n' - f'subactor_uid: {subactor_uid}\n' - f'remote task: {subactor_task_uid}\n\n' - - 'Sending `ctx.started(LockStatus)`..\n' - - ) - - # indicate to child that we've locked stdio - await ctx.started( - LockStatus( - subactor_uid=subactor_uid, - cid=ctx.cid, - locked=True, - ) - ) - - log.devx( - f'Actor {subactor_uid} acquired `Lock` via debugger request' - ) - - # wait for unlock pdb by child - async with ctx.open_stream() as stream: - release_msg: LockRelease = await stream.receive() - - # TODO: security around only releasing if - # these match? - log.devx( - f'TTY lock released requested\n\n' - f'{release_msg}\n' - ) - assert release_msg.cid == ctx.cid - assert release_msg.subactor_uid == tuple(subactor_uid) - - log.devx( - f'Actor {subactor_uid} released TTY lock' - ) - - return LockStatus( - subactor_uid=subactor_uid, - cid=ctx.cid, - locked=False, - ) - - except BaseException as req_err: - fail_reason: str = ( - f'on behalf of peer\n\n' - f'x)<=\n' - f' |_{subactor_task_uid!r}@{ctx.chan.uid!r}\n' - f'\n' - 'Forcing `Lock.release()` due to acquire failure!\n\n' - f'x)=>\n' - f' {ctx}' - ) - if isinstance(req_err, trio.Cancelled): - fail_reason = ( - 'Cancelled during stdio-mutex request ' - + - fail_reason - ) - else: - fail_reason = ( - 'Failed to deliver stdio-mutex request ' - + - fail_reason - ) - - log.exception(fail_reason) - Lock.release() - raise - - finally: - if lock_blocked: - Lock._blocked.remove(ctx.cid) - - # wakeup any waiters since the lock was (presumably) - # released, possibly only temporarily. - we_finished.set() - DebugStatus.unshield_sigint() - - -class DebugStateError(InternalError): - ''' - Something inconsistent or unexpected happend with a sub-actor's - debug mutex request to the root actor. - - ''' - - -# TODO: rename to ReplState or somethin? -# DebugRequest, make it a singleton instance? -class DebugStatus: - ''' - Singleton-state for debugging machinery in a subactor. - - Composes conc primitives for syncing with a root actor to - acquire the tree-global (TTY) `Lock` such that only ever one - actor's task can have the REPL active at a given time. - - Methods to shield the process' `SIGINT` handler are used - whenever a local task is an active REPL. - - ''' - # XXX local ref to the `pdbp.Pbp` instance, ONLY set in the - # actor-process that currently has activated a REPL i.e. it - # should be `None` (unset) in any other actor-process that does - # not yet have the `Lock` acquired via a root-actor debugger - # request. - repl: PdbREPL|None = None - - # TODO: yet again this looks like a task outcome where we need - # to sync to the completion of one task (and get its result) - # being used everywhere for syncing.. - # -[ ] see if we can get our proto oco task-mngr to work for - # this? - repl_task: Task|None = None - # repl_thread: Thread|None = None - # ^TODO? - - repl_release: trio.Event|None = None - - req_task: Task|None = None - req_ctx: Context|None = None - req_cs: CancelScope|None = None - req_finished: trio.Event|None = None - req_err: BaseException|None = None - - lock_status: LockStatus|None = None - - _orig_sigint_handler: Callable|None = None - _trio_handler: ( - Callable[[int, FrameType|None], Any] - |int - | None - ) = None - - @classmethod - def repr(cls) -> str: - fields: str = ( - f'repl: {cls.repl}\n' - f'repl_task: {cls.repl_task}\n' - f'repl_release: {cls.repl_release}\n' - f'req_ctx: {cls.req_ctx}\n' - ) - body: str = textwrap.indent( - fields, - prefix=' |_', - ) - return ( - f'<{cls.__name__}(\n' - f'{body}' - ')>' - ) - - # TODO: how do you get this to work on a non-inited class? - # __repr__ = classmethod(repr) - # __str__ = classmethod(repr) - - @classmethod - def shield_sigint(cls): - ''' - Shield out SIGINT handling (which by default triggers - `Task` cancellation) in subactors when a `pdb` REPL - is active. - - Avoids cancellation of the current actor (task) when the user - mistakenly sends ctl-c or via a recevied signal (from an - external request). Explicit runtime cancel requests are - allowed until the current REPL-session (the blocking call - `Pdb.interaction()`) exits, normally via the 'continue' or - 'quit' command - at which point the orig SIGINT handler is - restored via `.unshield_sigint()` below. - - Impl notes: - ----------- - - we prefer that `trio`'s default handler is always used when - SIGINT is unshielded (hence disabling the `pdb.Pdb` - defaults in `mk_pdb()`) such that reliable KBI cancellation - is always enforced. - - - we always detect whether we're running from a non-main - thread, in which case schedule the SIGINT shielding override - to in the main thread as per, - - https://docs.python.org/3/library/signal.html#signals-and-threads - - ''' - # - # XXX detect whether we're running from a non-main thread - # in which case schedule the SIGINT shielding override - # to in the main thread. - # https://docs.python.org/3/library/signal.html#signals-and-threads - if ( - not cls.is_main_trio_thread() - and - not _state._runtime_vars.get( - '_is_infected_aio', - False, - ) - ): - cls._orig_sigint_handler: Callable = trio.from_thread.run_sync( - signal.signal, - signal.SIGINT, - sigint_shield, - ) - - else: - cls._orig_sigint_handler = signal.signal( - signal.SIGINT, - sigint_shield, - ) - - @classmethod - @pdbp.hideframe # XXX NOTE XXX see below in `.pause_from_sync()` - def unshield_sigint(cls): - ''' - Un-shield SIGINT for REPL-active (su)bactor. - - See details in `.shield_sigint()`. - - ''' - # always restore ``trio``'s sigint handler. see notes below in - # the pdb factory about the nightmare that is that code swapping - # out the handler when the repl activates... - # if not cls.is_main_trio_thread(): - if ( - not cls.is_main_trio_thread() - and - not _state._runtime_vars.get( - '_is_infected_aio', - False, - ) - # not current_actor().is_infected_aio() - # ^XXX, since for bg-thr case will always raise.. - ): - trio.from_thread.run_sync( - signal.signal, - signal.SIGINT, - cls._trio_handler, - ) - else: - trio_h: Callable = cls._trio_handler - # XXX should never really happen XXX - if not trio_h: - mk_pdb().set_trace() - - signal.signal( - signal.SIGINT, - cls._trio_handler, - ) - - cls._orig_sigint_handler = None - - @classmethod - def is_main_trio_thread(cls) -> bool: - ''' - Check if we're the "main" thread (as in the first one - started by cpython) AND that it is ALSO the thread that - called `trio.run()` and not some thread spawned with - `trio.to_thread.run_sync()`. - - ''' - try: - async_lib: str = sniffio.current_async_library() - except sniffio.AsyncLibraryNotFoundError: - async_lib = None - - is_main_thread: bool = trio._util.is_main_thread() - # ^TODO, since this is private, @oremanj says - # we should just copy the impl for now..? - if is_main_thread: - thread_name: str = 'main' - else: - thread_name: str = threading.current_thread().name - - is_trio_main = ( - is_main_thread - and - (async_lib == 'trio') - ) - - report: str = f'Running thread: {thread_name!r}\n' - if async_lib: - report += ( - f'Current async-lib detected by `sniffio`: {async_lib}\n' - ) - else: - report += ( - 'No async-lib detected (by `sniffio`) ??\n' - ) - if not is_trio_main: - log.warning(report) - - return is_trio_main - # XXX apparently unreliable..see ^ - # ( - # threading.current_thread() - # is not threading.main_thread() - # ) - - @classmethod - def cancel(cls) -> bool: - if (req_cs := cls.req_cs): - req_cs.cancel() - return True - - return False - - @classmethod - # @pdbp.hideframe - def release( - cls, - cancel_req_task: bool = False, - ): - repl_release: trio.Event = cls.repl_release - try: - # sometimes the task might already be terminated in - # which case this call will raise an RTE? - # See below for reporting on that.. - if ( - repl_release is not None - and - not repl_release.is_set() - ): - if cls.is_main_trio_thread(): - repl_release.set() - - elif ( - _state._runtime_vars.get( - '_is_infected_aio', - False, - ) - # ^XXX, again bc we need to not except - # but for bg-thread case it will always raise.. - # - # TODO, is there a better api then using - # `err_on_no_runtime=False` in the below? - # current_actor().is_infected_aio() - ): - async def _set_repl_release(): - repl_release.set() - - fute: asyncio.Future = run_trio_task_in_future( - _set_repl_release - ) - if not fute.done(): - log.warning('REPL release state unknown..?') - - else: - # XXX NOTE ONLY used for bg root-actor sync - # threads, see `.pause_from_sync()`. - trio.from_thread.run_sync( - repl_release.set - ) - - except RuntimeError as rte: - log.exception( - f'Failed to release debug-request ??\n\n' - f'{cls.repr()}\n' - ) - # pdbp.set_trace() - raise rte - - finally: - # if req_ctx := cls.req_ctx: - # req_ctx._scope.cancel() - if cancel_req_task: - cancelled: bool = cls.cancel() - if not cancelled: - log.warning( - 'Failed to cancel request task!?\n' - f'{cls.repl_task}\n' - ) - - # actor-local state, irrelevant for non-root. - cls.repl_task = None - - # XXX WARNING needs very special caughtion, and we should - # prolly make a more explicit `@property` API? - # - # - if unset in root multi-threaded case can cause - # issues with detecting that some root thread is - # using a REPL, - # - # - what benefit is there to unsetting, it's always - # set again for the next task in some actor.. - # only thing would be to avoid in the sigint-handler - # logging when we don't need to? - cls.repl = None - - # maybe restore original sigint handler - # XXX requires runtime check to avoid crash! - if current_actor(err_on_no_runtime=False): - cls.unshield_sigint() - - -# TODO: use the new `@lowlevel.singleton` for this! -def get_debug_req() -> DebugStatus|None: - return DebugStatus - - -class TractorConfig(pdbp.DefaultConfig): - ''' - Custom `pdbp` config which tries to use the best tradeoff - between pretty and minimal. - - ''' - use_pygments: bool = True - sticky_by_default: bool = False - enable_hidden_frames: bool = True - - # much thanks @mdmintz for the hot tip! - # fixes line spacing issue when resizing terminal B) - truncate_long_lines: bool = False - - # ------ - ------ - # our own custom config vars mostly - # for syncing with the actor tree's singleton - # TTY `Lock`. - - -class PdbREPL(pdbp.Pdb): - ''' - Add teardown hooks and local state describing any - ongoing TTY `Lock` request dialog. - - ''' - # override the pdbp config with our coolio one - # NOTE: this is only loaded when no `~/.pdbrc` exists - # so we should prolly pass it into the .__init__() instead? - # i dunno, see the `DefaultFactory` and `pdb.Pdb` impls. - DefaultConfig = TractorConfig - - status = DebugStatus - - # NOTE: see details in stdlib's `bdb.py` - # def user_exception(self, frame, exc_info): - # ''' - # Called when we stop on an exception. - # ''' - # log.warning( - # 'Exception during REPL sesh\n\n' - # f'{frame}\n\n' - # f'{exc_info}\n\n' - # ) - - # NOTE: this actually hooks but i don't see anyway to detect - # if an error was caught.. this is why currently we just always - # call `DebugStatus.release` inside `_post_mortem()`. - # def preloop(self): - # print('IN PRELOOP') - # super().preloop() - - # TODO: cleaner re-wrapping of all this? - # -[ ] figure out how to disallow recursive .set_trace() entry - # since that'll cause deadlock for us. - # -[ ] maybe a `@cm` to call `super().()`? - # -[ ] look at hooking into the `pp` hook specially with our - # own set of pretty-printers? - # * `.pretty_struct.Struct.pformat()` - # * `.pformat(MsgType.pld)` - # * `.pformat(Error.tb_str)`? - # * .. maybe more? - # - def set_continue(self): - try: - super().set_continue() - finally: - # NOTE: for subactors the stdio lock is released via the - # allocated RPC locker task, so for root we have to do it - # manually. - if ( - is_root_process() - and - Lock._debug_lock.locked() - and - DebugStatus.is_main_trio_thread() - ): - # Lock.release(raise_on_thread=False) - Lock.release() - - # XXX AFTER `Lock.release()` for root local repl usage - DebugStatus.release() - - def set_quit(self): - try: - super().set_quit() - finally: - if ( - is_root_process() - and - Lock._debug_lock.locked() - and - DebugStatus.is_main_trio_thread() - ): - # Lock.release(raise_on_thread=False) - Lock.release() - - # XXX after `Lock.release()` for root local repl usage - DebugStatus.release() - - # XXX NOTE: we only override this because apparently the stdlib pdb - # bois likes to touch the SIGINT handler as much as i like to touch - # my d$%&. - def _cmdloop(self): - self.cmdloop() - - @cached_property - def shname(self) -> str | None: - ''' - Attempt to return the login shell name with a special check for - the infamous `xonsh` since it seems to have some issues much - different from std shells when it comes to flushing the prompt? - - ''' - # SUPER HACKY and only really works if `xonsh` is not used - # before spawning further sub-shells.. - shpath = os.getenv('SHELL', None) - - if shpath: - if ( - os.getenv('XONSH_LOGIN', default=False) - or 'xonsh' in shpath - ): - return 'xonsh' - - return os.path.basename(shpath) - - return None - - -async def request_root_stdio_lock( - actor_uid: tuple[str, str], - task_uid: tuple[str, int], - - shield: bool = False, - task_status: TaskStatus[CancelScope] = trio.TASK_STATUS_IGNORED, -): - ''' - Connect to the root actor for this actor's process tree and - RPC-invoke a task which acquires the std-streams global `Lock`: - a process-tree-global mutex which prevents multiple actors from - entering `PdbREPL.interaction()` at the same time such that the - parent TTY's stdio is never "clobbered" by simultaneous - reads/writes. - - The actual `Lock` singleton instance exists ONLY in the root - actor's memory space and does nothing more then manage - process-tree global state, - namely a `._debug_lock: trio.FIFOLock`. - - The actual `PdbREPL` interaction/operation is completely isolated - to each sub-actor (process) with the root's `Lock` providing the - multi-process mutex-syncing mechanism to avoid parallel REPL - usage within an actor tree. - - ''' - log.devx( - 'Initing stdio-lock request task with root actor' - ) - # TODO: can we implement this mutex more generally as - # a `._sync.Lock`? - # -[ ] simply add the wrapping needed for the debugger specifics? - # - the `__pld_spec__` impl and maybe better APIs for the client - # vs. server side state tracking? (`Lock` + `DebugStatus`) - # -[ ] for eg. `mp` has a multi-proc lock via the manager - # - https://docs.python.org/3.8/library/multiprocessing.html#synchronization-primitives - # -[ ] technically we need a `RLock` since re-acquire should be a noop - # - https://docs.python.org/3.8/library/multiprocessing.html#multiprocessing.RLock - DebugStatus.req_finished = trio.Event() - DebugStatus.req_task = current_task() - req_err: BaseException|None = None - try: - from tractor._discovery import get_root - # NOTE: we need this to ensure that this task exits - # BEFORE the REPl instance raises an error like - # `bdb.BdbQuit` directly, OW you get a trio cs stack - # corruption! - # Further, the since this task is spawned inside the - # `Context._scope_nursery: trio.Nursery`, once an RPC - # task errors that cs is cancel_called and so if we want - # to debug the TPC task that failed we need to shield - # against that expected `.cancel()` call and instead - # expect all of the `PdbREPL`.set_[continue/quit/]()` - # methods to unblock this task by setting the - # `.repl_release: # trio.Event`. - with trio.CancelScope(shield=shield) as req_cs: - # XXX: was orig for debugging cs stack corruption.. - # log.devx( - # 'Request cancel-scope is:\n\n' - # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' - # ) - DebugStatus.req_cs = req_cs - req_ctx: Context|None = None - ctx_eg: BaseExceptionGroup|None = None - try: - # TODO: merge into single async with ? - async with get_root() as portal: - async with portal.open_context( - lock_stdio_for_peer, - subactor_task_uid=task_uid, - - # NOTE: set it here in the locker request task bc it's - # possible for multiple such requests for the lock in any - # single sub-actor AND there will be a race between when the - # root locking task delivers the `Started(pld=LockStatus)` - # and when the REPL is actually entered by the requesting - # application task who called - # `.pause()`/`.post_mortem()`. - # - # SO, applying the pld-spec here means it is only applied to - # this IPC-ctx request task, NOT any other task(s) - # including the one that actually enters the REPL. This - # is oc desired bc ow the debugged task will msg-type-error. - # pld_spec=__pld_spec__, - - ) as (req_ctx, status): - - DebugStatus.req_ctx = req_ctx - log.devx( - 'Subactor locked TTY with msg\n\n' - f'{status}\n' - ) - - # try: - if (locker := status.subactor_uid) != actor_uid: - raise DebugStateError( - f'Root actor locked by another peer !?\n' - f'locker: {locker!r}\n' - f'actor_uid: {actor_uid}\n' - ) - assert status.cid - # except AttributeError: - # log.exception('failed pldspec asserts!') - # mk_pdb().set_trace() - # raise - - # set last rxed lock dialog status. - DebugStatus.lock_status = status - - async with req_ctx.open_stream() as stream: - task_status.started(req_ctx) - - # wait for local task to exit - # `PdbREPL.interaction()`, normally via - # a `DebugStatus.release()`call, and - # then unblock us here. - await DebugStatus.repl_release.wait() - await stream.send( - LockRelease( - subactor_uid=actor_uid, - cid=status.cid, - ) - ) - - # sync with child-side root locker task - # completion - status: LockStatus = await req_ctx.result() - assert not status.locked - DebugStatus.lock_status = status - - log.devx( - 'TTY lock was released for subactor with msg\n\n' - f'{status}\n\n' - f'Exitting {req_ctx.side!r}-side of locking req_ctx\n' - ) - - except* ( - tractor.ContextCancelled, - trio.Cancelled, - ) as _taskc_eg: - ctx_eg = _taskc_eg - log.cancel( - 'Debug lock request was CANCELLED?\n\n' - f'<=c) {req_ctx}\n' - # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' - # f'{pformat_cs(req_ctx._scope, var_name="req_ctx._scope")}\n\n' - ) - raise - - except* ( - BaseException, - ) as _ctx_eg: - ctx_eg = _ctx_eg - message: str = ( - 'Failed during debug request dialog with root actor?\n' - ) - if (req_ctx := DebugStatus.req_ctx): - message += ( - f'<=x)\n' - f' |_{req_ctx}\n' - f'Cancelling IPC ctx!\n' - ) - try: - await req_ctx.cancel() - except trio.ClosedResourceError as terr: - ctx_eg.add_note( - # f'Failed with {type(terr)!r} x)> `req_ctx.cancel()` ' - f'Failed with `req_ctx.cancel()` PdbREPL: - ''' - Deliver a new `PdbREPL`: a multi-process safe `pdbp.Pdb`-variant - using the magic of `tractor`'s SC-safe IPC. - - B) - - Our `pdb.Pdb` subtype accomplishes multi-process safe debugging - by: - - - mutexing access to the root process' std-streams (& thus parent - process TTY) via an IPC managed `Lock` singleton per - actor-process tree. - - - temporarily overriding any subactor's SIGINT handler to shield - during live REPL sessions in sub-actors such that cancellation - is never (mistakenly) triggered by a ctrl-c and instead only by - explicit runtime API requests or after the - `pdb.Pdb.interaction()` call has returned. - - FURTHER, the `pdbp.Pdb` instance is configured to be `trio` - "compatible" from a SIGINT handling perspective; we mask out - the default `pdb` handler and instead apply `trio`s default - which mostly addresses all issues described in: - - - https://github.com/python-trio/trio/issues/1155 - - The instance returned from this factory should always be - preferred over the default `pdb[p].set_trace()` whenever using - a `pdb` REPL inside a `trio` based runtime. - - ''' - pdb = PdbREPL() - - # XXX: These are the important flags mentioned in - # https://github.com/python-trio/trio/issues/1155 - # which resolve the traceback spews to console. - pdb.allow_kbdint = True - pdb.nosigint = True - return pdb - - -def any_connected_locker_child() -> bool: - ''' - Predicate to determine if a reported child subactor in debug - is actually connected. - - Useful to detect stale `Lock` requests after IPC failure. - - ''' - actor: Actor = current_actor() - - if not is_root_process(): - raise InternalError('This is a root-actor only API!') - - if ( - (ctx := Lock.ctx_in_debug) - and - (uid_in_debug := ctx.chan.uid) - ): - chans: list[tractor.Channel] = actor._peers.get( - tuple(uid_in_debug) - ) - if chans: - return any( - chan.connected() - for chan in chans - ) - - return False - - -_ctlc_ignore_header: str = ( - 'Ignoring SIGINT while debug REPL in use' -) - -def sigint_shield( - signum: int, - frame: 'frame', # type: ignore # noqa - *args, - -) -> None: - ''' - Specialized, debugger-aware SIGINT handler. - - In childred we always ignore/shield for SIGINT to avoid - deadlocks since cancellation should always be managed by the - supervising parent actor. The root actor-proces is always - cancelled on ctrl-c. - - ''' - __tracebackhide__: bool = True - actor: Actor = current_actor() - - def do_cancel(): - # If we haven't tried to cancel the runtime then do that instead - # of raising a KBI (which may non-gracefully destroy - # a ``trio.run()``). - if not actor._cancel_called: - actor.cancel_soon() - - # If the runtime is already cancelled it likely means the user - # hit ctrl-c again because teardown didn't fully take place in - # which case we do the "hard" raising of a local KBI. - else: - raise KeyboardInterrupt - - # only set in the actor actually running the REPL - repl: PdbREPL|None = DebugStatus.repl - - # TODO: maybe we should flatten out all these cases using - # a match/case? - # - # root actor branch that reports whether or not a child - # has locked debugger. - if is_root_process(): - # log.warning( - log.devx( - 'Handling SIGINT in root actor\n' - f'{Lock.repr()}' - f'{DebugStatus.repr()}\n' - ) - # try to see if the supposed (sub)actor in debug still - # has an active connection to *this* actor, and if not - # it's likely they aren't using the TTY lock / debugger - # and we should propagate SIGINT normally. - any_connected: bool = any_connected_locker_child() - - problem = ( - f'root {actor.uid} handling SIGINT\n' - f'any_connected: {any_connected}\n\n' - - f'{Lock.repr()}\n' - ) - - if ( - (ctx := Lock.ctx_in_debug) - and - (uid_in_debug := ctx.chan.uid) # "someone" is (ostensibly) using debug `Lock` - ): - name_in_debug: str = uid_in_debug[0] - assert not repl - # if not repl: # but it's NOT us, the root actor. - # sanity: since no repl ref is set, we def shouldn't - # be the lock owner! - assert name_in_debug != 'root' - - # IDEAL CASE: child has REPL as expected - if any_connected: # there are subactors we can contact - # XXX: only if there is an existing connection to the - # (sub-)actor in debug do we ignore SIGINT in this - # parent! Otherwise we may hang waiting for an actor - # which has already terminated to unlock. - # - # NOTE: don't emit this with `.pdb()` level in - # root without a higher level. - log.runtime( - _ctlc_ignore_header - + - f' by child ' - f'{uid_in_debug}\n' - ) - problem = None - - else: - problem += ( - '\n' - f'A `pdb` REPL is SUPPOSEDLY in use by child {uid_in_debug}\n' - f'BUT, no child actors are IPC contactable!?!?\n' - ) - - # IDEAL CASE: root has REPL as expected - else: - # root actor still has this SIGINT handler active without - # an actor using the `Lock` (a bug state) ?? - # => so immediately cancel any stale lock cs and revert - # the handler! - if not DebugStatus.repl: - # TODO: WHEN should we revert back to ``trio`` - # handler if this one is stale? - # -[ ] maybe after a counts work of ctl-c mashes? - # -[ ] use a state var like `stale_handler: bool`? - problem += ( - 'No subactor is using a `pdb` REPL according `Lock.ctx_in_debug`?\n' - 'BUT, the root should be using it, WHY this handler ??\n\n' - 'So either..\n' - '- some root-thread is using it but has no `.repl` set?, OR\n' - '- something else weird is going on outside the runtime!?\n' - ) - else: - # NOTE: since we emit this msg on ctl-c, we should - # also always re-print the prompt the tail block! - log.pdb( - _ctlc_ignore_header - + - f' by root actor..\n' - f'{DebugStatus.repl_task}\n' - f' |_{repl}\n' - ) - problem = None - - # XXX if one is set it means we ARE NOT operating an ideal - # case where a child subactor or us (the root) has the - # lock without any other detected problems. - if problem: - - # detect, report and maybe clear a stale lock request - # cancel scope. - lock_cs: trio.CancelScope = Lock.get_locking_task_cs() - maybe_stale_lock_cs: bool = ( - lock_cs is not None - and not lock_cs.cancel_called - ) - if maybe_stale_lock_cs: - problem += ( - '\n' - 'Stale `Lock.ctx_in_debug._scope: CancelScope` detected?\n' - f'{Lock.ctx_in_debug}\n\n' - - '-> Calling ctx._scope.cancel()!\n' - ) - lock_cs.cancel() - - # TODO: wen do we actually want/need this, see above. - # DebugStatus.unshield_sigint() - log.warning(problem) - - # child actor that has locked the debugger - elif not is_root_process(): - log.debug( - f'Subactor {actor.uid} handling SIGINT\n\n' - f'{Lock.repr()}\n' - ) - - rent_chan: Channel = actor._parent_chan - if ( - rent_chan is None - or - not rent_chan.connected() - ): - log.warning( - 'This sub-actor thinks it is debugging ' - 'but it has no connection to its parent ??\n' - f'{actor.uid}\n' - 'Allowing SIGINT propagation..' - ) - DebugStatus.unshield_sigint() - - repl_task: str|None = DebugStatus.repl_task - req_task: str|None = DebugStatus.req_task - if ( - repl_task - and - repl - ): - log.pdb( - _ctlc_ignore_header - + - f' by local task\n\n' - f'{repl_task}\n' - f' |_{repl}\n' - ) - elif req_task: - log.debug( - _ctlc_ignore_header - + - f' by local request-task and either,\n' - f'- someone else is already REPL-in and has the `Lock`, or\n' - f'- some other local task already is replin?\n\n' - f'{req_task}\n' - ) - - # TODO can we remove this now? - # -[ ] does this path ever get hit any more? - else: - msg: str = ( - 'SIGINT shield handler still active BUT, \n\n' - ) - if repl_task is None: - msg += ( - '- No local task claims to be in debug?\n' - ) - - if repl is None: - msg += ( - '- No local REPL is currently active?\n' - ) - - if req_task is None: - msg += ( - '- No debug request task is active?\n' - ) - - log.warning( - msg - + - 'Reverting handler to `trio` default!\n' - ) - DebugStatus.unshield_sigint() - - # XXX ensure that the reverted-to-handler actually is - # able to rx what should have been **this** KBI ;) - do_cancel() - - # TODO: how to handle the case of an intermediary-child actor - # that **is not** marked in debug mode? See oustanding issue: - # https://github.com/goodboy/tractor/issues/320 - # elif debug_mode(): - - # maybe redraw/print last REPL output to console since - # we want to alert the user that more input is expect since - # nothing has been done dur to ignoring sigint. - if ( - DebugStatus.repl # only when current actor has a REPL engaged - ): - flush_status: str = ( - 'Flushing stdout to ensure new prompt line!\n' - ) - - # XXX: yah, mega hack, but how else do we catch this madness XD - if ( - repl.shname == 'xonsh' - ): - flush_status += ( - '-> ALSO re-flushing due to `xonsh`..\n' - ) - repl.stdout.write(repl.prompt) - - # log.warning( - log.devx( - flush_status - ) - repl.stdout.flush() - - # TODO: better console UX to match the current "mode": - # -[ ] for example if in sticky mode where if there is output - # detected as written to the tty we redraw this part underneath - # and erase the past draw of this same bit above? - # repl.sticky = True - # repl._print_if_sticky() - - # also see these links for an approach from `ptk`: - # https://github.com/goodboy/tractor/issues/130#issuecomment-663752040 - # https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py - else: - log.devx( - # log.warning( - 'Not flushing stdout since not needed?\n' - f'|_{repl}\n' - ) - - # XXX only for tracing this handler - log.devx('exiting SIGINT') - - -_pause_msg: str = 'Opening a pdb REPL in paused actor' - - -class DebugRequestError(RuntimeError): - ''' - Failed to request stdio lock from root actor! - - ''' - - -_repl_fail_msg: str|None = ( - 'Failed to REPl via `_pause()` ' -) - - -async def _pause( - - debug_func: Callable|partial|None, - - # NOTE: must be passed in the `.pause_from_sync()` case! - repl: PdbREPL|None = None, - - # TODO: allow caller to pause despite task cancellation, - # exactly the same as wrapping with: - # with CancelScope(shield=True): - # await pause() - # => the REMAINING ISSUE is that the scope's .__exit__() frame - # is always show in the debugger on entry.. and there seems to - # be no way to override it?.. - # - shield: bool = False, - hide_tb: bool = True, - called_from_sync: bool = False, - called_from_bg_thread: bool = False, - task_status: TaskStatus[ - tuple[Task, PdbREPL], - trio.Event - ] = trio.TASK_STATUS_IGNORED, - **debug_func_kwargs, - -) -> tuple[Task, PdbREPL]|None: - ''' - Inner impl for `pause()` to avoid the `trio.CancelScope.__exit__()` - stack frame when not shielded (since apparently i can't figure out - how to hide it using the normal mechanisms..) - - Hopefully we won't need this in the long run. - - ''' - __tracebackhide__: bool = hide_tb - pause_err: BaseException|None = None - actor: Actor = current_actor() - try: - task: Task = current_task() - except RuntimeError as rte: - # NOTE, 2 cases we might get here: - # - # - ACTUALLY not a `trio.lowlevel.Task` nor runtime caller, - # |_ error out as normal - # - # - an infected `asycio` actor calls it from an actual - # `asyncio.Task` - # |_ in this case we DO NOT want to RTE! - __tracebackhide__: bool = False - if actor.is_infected_aio(): - log.exception( - 'Failed to get current `trio`-task?' - ) - raise RuntimeError( - 'An `asyncio` task should not be calling this!?' - ) from rte - else: - task = asyncio.current_task() - - if debug_func is not None: - debug_func = partial(debug_func) - - # XXX NOTE XXX set it here to avoid ctl-c from cancelling a debug - # request from a subactor BEFORE the REPL is entered by that - # process. - if ( - not repl - and - debug_func - ): - repl: PdbREPL = mk_pdb() - DebugStatus.shield_sigint() - - # TODO: move this into a `open_debug_request()` @acm? - # -[ ] prolly makes the most sense to do the request - # task spawn as part of an `@acm` api which delivers the - # `DebugRequest` instance and ensures encapsing all the - # pld-spec and debug-nursery? - # -[ ] maybe make this a `PdbREPL` method or mod func? - # -[ ] factor out better, main reason for it is common logic for - # both root and sub repl entry - def _enter_repl_sync( - debug_func: partial[None], - ) -> None: - __tracebackhide__: bool = hide_tb - debug_func_name: str = ( - debug_func.func.__name__ if debug_func else 'None' - ) - - # TODO: do we want to support using this **just** for the - # locking / common code (prolly to help address #320)? - task_status.started((task, repl)) - try: - if debug_func: - # block here one (at the appropriate frame *up*) where - # ``breakpoint()`` was awaited and begin handling stdio. - log.devx( - 'Entering sync world of the `pdb` REPL for task..\n' - f'{repl}\n' - f' |_{task}\n' - ) - - # set local task on process-global state to avoid - # recurrent entries/requests from the same - # actor-local task. - DebugStatus.repl_task = task - if repl: - DebugStatus.repl = repl - else: - log.error( - 'No REPl instance set before entering `debug_func`?\n' - f'{debug_func}\n' - ) - - # invoke the low-level REPL activation routine which itself - # should call into a `Pdb.set_trace()` of some sort. - debug_func( - repl=repl, - hide_tb=hide_tb, - **debug_func_kwargs, - ) - - # TODO: maybe invert this logic and instead - # do `assert debug_func is None` when - # `called_from_sync`? - else: - if ( - called_from_sync - and - not DebugStatus.is_main_trio_thread() - ): - assert called_from_bg_thread - assert DebugStatus.repl_task is not task - - return (task, repl) - - except trio.Cancelled: - log.exception( - 'Cancelled during invoke of internal\n\n' - f'`debug_func = {debug_func_name}`\n' - ) - # XXX NOTE: DON'T release lock yet - raise - - except BaseException: - __tracebackhide__: bool = False - log.exception( - 'Failed to invoke internal\n\n' - f'`debug_func = {debug_func_name}`\n' - ) - # NOTE: OW this is ONLY called from the - # `.set_continue/next` hooks! - DebugStatus.release(cancel_req_task=True) - - raise - - log.devx( - 'Entering `._pause()` for requesting task\n' - f'|_{task}\n' - ) - - # TODO: this should be created as part of `DebugRequest()` init - # which should instead be a one-shot-use singleton much like - # the `PdbREPL`. - repl_task: Thread|Task|None = DebugStatus.repl_task - if ( - not DebugStatus.repl_release - or - DebugStatus.repl_release.is_set() - ): - log.devx( - 'Setting new `DebugStatus.repl_release: trio.Event` for requesting task\n' - f'|_{task}\n' - ) - DebugStatus.repl_release = trio.Event() - else: - log.devx( - 'Already an existing actor-local REPL user task\n' - f'|_{repl_task}\n' - ) - - # ^-NOTE-^ this must be created BEFORE scheduling any subactor - # debug-req task since it needs to wait on it just after - # `.started()`-ing back its wrapping `.req_cs: CancelScope`. - - repl_err: BaseException|None = None - try: - if is_root_process(): - # we also wait in the root-parent for any child that - # may have the tty locked prior - # TODO: wait, what about multiple root tasks (with bg - # threads) acquiring it though? - ctx: Context|None = Lock.ctx_in_debug - repl_task: Task|None = DebugStatus.repl_task - if ( - ctx is None - and - repl_task is task - # and - # DebugStatus.repl - # ^-NOTE-^ matches for multi-threaded case as well? - ): - # re-entrant root process already has it: noop. - log.warning( - f'This root actor task is already within an active REPL session\n' - f'Ignoring this recurrent`tractor.pause()` entry\n\n' - f'|_{task}\n' - # TODO: use `._frame_stack` scanner to find the @api_frame - ) - with trio.CancelScope(shield=shield): - await trio.lowlevel.checkpoint() - return (repl, task) - - # elif repl_task: - # log.warning( - # f'This root actor has another task already in REPL\n' - # f'Waitin for the other task to complete..\n\n' - # f'|_{task}\n' - # # TODO: use `._frame_stack` scanner to find the @api_frame - # ) - # with trio.CancelScope(shield=shield): - # await DebugStatus.repl_release.wait() - # await trio.sleep(0.1) - - # must shield here to avoid hitting a `Cancelled` and - # a child getting stuck bc we clobbered the tty - with trio.CancelScope(shield=shield): - ctx_line = '`Lock` in this root actor task' - acq_prefix: str = 'shield-' if shield else '' - if ( - Lock._debug_lock.locked() - ): - if ctx: - ctx_line: str = ( - 'active `Lock` owned by ctx\n\n' - f'{ctx}' - ) - elif Lock._owned_by_root: - ctx_line: str = ( - 'Already owned by root-task `Lock`\n\n' - f'repl_task: {DebugStatus.repl_task}\n' - f'repl: {DebugStatus.repl}\n' - ) - else: - ctx_line: str = ( - '**STALE `Lock`** held by unknown root/remote task ' - 'with no request ctx !?!?' - ) - - log.devx( - f'attempting to {acq_prefix}acquire ' - f'{ctx_line}' - ) - await Lock._debug_lock.acquire() - Lock._owned_by_root = True - # else: - - # if ( - # not called_from_bg_thread - # and not called_from_sync - # ): - # log.devx( - # f'attempting to {acq_prefix}acquire ' - # f'{ctx_line}' - # ) - - # XXX: since we need to enter pdb synchronously below, - # and we don't want to block the thread that starts - # stepping through the application thread, we later - # must `Lock._debug_lock.release()` manually from - # some `PdbREPL` completion callback(`.set_[continue/exit]()`). - # - # So, when `._pause()` is called from a (bg/non-trio) - # thread, special provisions are needed and we need - # to do the `.acquire()`/`.release()` calls from - # a common `trio.task` (due to internal impl of - # `FIFOLock`). Thus we do not acquire here and - # instead expect `.pause_from_sync()` to take care of - # this detail depending on the caller's (threading) - # usage. - # - # NOTE that this special case is ONLY required when - # using `.pause_from_sync()` from the root actor - # since OW a subactor will instead make an IPC - # request (in the branch below) to acquire the - # `Lock`-mutex and a common root-actor RPC task will - # take care of `._debug_lock` mgmt! - - # enter REPL from root, no TTY locking IPC ctx necessary - # since we can acquire the `Lock._debug_lock` directly in - # thread. - return _enter_repl_sync(debug_func) - - # TODO: need a more robust check for the "root" actor - elif ( - not is_root_process() - and actor._parent_chan # a connected child - ): - repl_task: Task|None = DebugStatus.repl_task - req_task: Task|None = DebugStatus.req_task - if req_task: - log.warning( - f'Already an ongoing repl request?\n' - f'|_{req_task}\n\n' - - f'REPL task is\n' - f'|_{repl_task}\n\n' - - ) - # Recurrent entry case. - # this task already has the lock and is likely - # recurrently entering a `.pause()`-point either bc, - # - someone is hacking on runtime internals and put - # one inside code that get's called on the way to - # this code, - # - a legit app task uses the 'next' command while in - # a REPL sesh, and actually enters another - # `.pause()` (in a loop or something). - # - # XXX Any other cose is likely a bug. - if ( - repl_task - ): - if repl_task is task: - log.warning( - f'{task.name}@{actor.uid} already has TTY lock\n' - f'ignoring..' - ) - with trio.CancelScope(shield=shield): - await trio.lowlevel.checkpoint() - return - - else: - # if **this** actor is already in debug REPL we want - # to maintain actor-local-task mutex access, so block - # here waiting for the control to be released - this - # -> allows for recursive entries to `tractor.pause()` - log.warning( - f'{task}@{actor.uid} already has TTY lock\n' - f'waiting for release..' - ) - with trio.CancelScope(shield=shield): - await DebugStatus.repl_release.wait() - await trio.sleep(0.1) - - elif ( - req_task - ): - log.warning( - 'Local task already has active debug request\n' - f'|_{task}\n\n' - - 'Waiting for previous request to complete..\n' - ) - with trio.CancelScope(shield=shield): - await DebugStatus.req_finished.wait() - - # this **must** be awaited by the caller and is done using the - # root nursery so that the debugger can continue to run without - # being restricted by the scope of a new task nursery. - - # TODO: if we want to debug a trio.Cancelled triggered exception - # we have to figure out how to avoid having the service nursery - # cancel on this task start? I *think* this works below: - # ```python - # actor._service_n.cancel_scope.shield = shield - # ``` - # but not entirely sure if that's a sane way to implement it? - - # NOTE currently we spawn the lock request task inside this - # subactor's global `Actor._service_n` so that the - # lifetime of the lock-request can outlive the current - # `._pause()` scope while the user steps through their - # application code and when they finally exit the - # session, via 'continue' or 'quit' cmds, the `PdbREPL` - # will manually call `DebugStatus.release()` to release - # the lock session with the root actor. - # - # TODO: ideally we can add a tighter scope for this - # request task likely by conditionally opening a "debug - # nursery" inside `_errors_relayed_via_ipc()`, see the - # todo in tht module, but - # -[ ] it needs to be outside the normal crash handling - # `_maybe_enter_debugger()` block-call. - # -[ ] we probably only need to allocate the nursery when - # we detect the runtime is already in debug mode. - # - curr_ctx: Context = current_ipc_ctx() - # req_ctx: Context = await curr_ctx._debug_tn.start( - log.devx( - 'Starting request task\n' - f'|_{task}\n' - ) - with trio.CancelScope(shield=shield): - req_ctx: Context = await actor._service_n.start( - partial( - request_root_stdio_lock, - actor_uid=actor.uid, - task_uid=(task.name, id(task)), # task uuid (effectively) - shield=shield, - ) - ) - # XXX sanity, our locker task should be the one which - # entered a new IPC ctx with the root actor, NOT the one - # that exists around the task calling into `._pause()`. - assert ( - req_ctx - is - DebugStatus.req_ctx - is not - curr_ctx - ) - - # enter REPL - return _enter_repl_sync(debug_func) - - # TODO: prolly factor this plus the similar block from - # `_enter_repl_sync()` into a common @cm? - except BaseException as _pause_err: - pause_err: BaseException = _pause_err - _repl_fail_report: str|None = _repl_fail_msg - if isinstance(pause_err, bdb.BdbQuit): - log.devx( - 'REPL for pdb was explicitly quit!\n' - ) - _repl_fail_report = None - - # when the actor is mid-runtime cancellation the - # `Actor._service_n` might get closed before we can spawn - # the request task, so just ignore expected RTE. - elif ( - isinstance(pause_err, RuntimeError) - and - actor._cancel_called - ): - # service nursery won't be usable and we - # don't want to lock up the root either way since - # we're in (the midst of) cancellation. - log.warning( - 'Service nursery likely closed due to actor-runtime cancellation..\n' - 'Ignoring failed debugger lock request task spawn..\n' - ) - return - - elif isinstance(pause_err, trio.Cancelled): - _repl_fail_report += ( - 'You called `tractor.pause()` from an already cancelled scope!\n\n' - 'Consider `await tractor.pause(shield=True)` to make it work B)\n' - ) - - else: - _repl_fail_report += f'on behalf of {repl_task} ??\n' - - if _repl_fail_report: - log.exception(_repl_fail_report) - - if not actor.is_infected_aio(): - DebugStatus.release(cancel_req_task=True) - - # sanity checks for ^ on request/status teardown - # assert DebugStatus.repl is None # XXX no more bc bg thread cases? - assert DebugStatus.repl_task is None - - # sanity, for when hackin on all this? - if not isinstance(pause_err, trio.Cancelled): - req_ctx: Context = DebugStatus.req_ctx - # if req_ctx: - # # XXX, bc the child-task in root might cancel it? - # # assert req_ctx._scope.cancel_called - # assert req_ctx.maybe_error - - raise - - finally: - # set in finally block of func.. this can be synced-to - # eventually with a debug_nursery somehow? - # assert DebugStatus.req_task is None - - # always show frame when request fails due to internal - # failure in the above code (including an `BdbQuit`). - if ( - DebugStatus.req_err - or - repl_err - or - pause_err - ): - __tracebackhide__: bool = False - - -def _set_trace( - repl: PdbREPL, # passed by `_pause()` - hide_tb: bool, - - # partial-ed in by `.pause()` - api_frame: FrameType, - - # optionally passed in to provide support for - # `pause_from_sync()` where - actor: tractor.Actor|None = None, - task: Task|Thread|None = None, -): - __tracebackhide__: bool = hide_tb - actor: tractor.Actor = actor or current_actor() - task: Task|Thread = task or current_task() - - # else: - # TODO: maybe print the actor supervion tree up to the - # root here? Bo - log.pdb( - f'{_pause_msg}\n' - f'>(\n' - f'|_{actor.uid}\n' - f' |_{task}\n' # @ {actor.uid}\n' - # f'|_{task}\n' - # ^-TODO-^ more compact pformating? - # -[ ] make an `Actor.__repr()__` - # -[ ] should we use `log.pformat_task_uid()`? - ) - # presuming the caller passed in the "api frame" - # (the last frame before user code - like `.pause()`) - # then we only step up one frame to where the user - # called our API. - caller_frame: FrameType = api_frame.f_back # type: ignore - - # pretend this frame is the caller frame to show - # the entire call-stack all the way down to here. - if not hide_tb: - caller_frame: FrameType = inspect.currentframe() - - # engage ze REPL - # B~() - repl.set_trace(frame=caller_frame) - - -# XXX TODO! XXX, ensure `pytest -s` doesn't just -# hang on this being called in a test.. XD -# -[ ] maybe something in our test suite or is there -# some way we can detect output capture is enabled -# from the process itself? -# |_ronny: ? -# -async def pause( - *, - hide_tb: bool = True, - api_frame: FrameType|None = None, - - # TODO: figure out how to still make this work: - # -[ ] pass it direct to `_pause()`? - # -[ ] use it to set the `debug_nursery.cancel_scope.shield` - shield: bool = False, - **_pause_kwargs, - -) -> None: - ''' - A pause point (more commonly known as a "breakpoint") interrupt - instruction for engaging a blocking debugger instance to - conduct manual console-based-REPL-interaction from within - `tractor`'s async runtime, normally from some single-threaded - and currently executing actor-hosted-`trio`-task in some - (remote) process. - - NOTE: we use the semantics "pause" since it better encompasses - the entirety of the necessary global-runtime-state-mutation any - actor-task must access and lock in order to get full isolated - control over the process tree's root TTY: - https://en.wikipedia.org/wiki/Breakpoint - - ''' - __tracebackhide__: bool = hide_tb - - # always start 1 level up from THIS in user code since normally - # `tractor.pause()` is called explicitly by use-app code thus - # making it the highest up @api_frame. - api_frame: FrameType = api_frame or inspect.currentframe() - - # XXX TODO: this was causing cs-stack corruption in trio due to - # usage within the `Context._scope_nursery` (which won't work - # based on scoping of it versus call to `_maybe_enter_debugger()` - # from `._rpc._invoke()`) - # with trio.CancelScope( - # shield=shield, - # ) as cs: - # NOTE: so the caller can always manually cancel even - # if shielded! - # task_status.started(cs) - # log.critical( - # '`.pause() cancel-scope is:\n\n' - # f'{pformat_cs(cs, var_name="pause_cs")}\n\n' - # ) - await _pause( - debug_func=partial( - _set_trace, - api_frame=api_frame, - ), - shield=shield, - **_pause_kwargs - ) - # XXX avoid cs stack corruption when `PdbREPL.interaction()` - # raises `BdbQuit`. - # await DebugStatus.req_finished.wait() - - -_gb_mod: None|ModuleType|False = None - - -def maybe_import_greenback( - raise_not_found: bool = True, - force_reload: bool = False, - -) -> ModuleType|False: - # be cached-fast on module-already-inited - global _gb_mod - - if _gb_mod is False: - return False - - elif ( - _gb_mod is not None - and not force_reload - ): - return _gb_mod - - try: - import greenback - _gb_mod = greenback - return greenback - - except ModuleNotFoundError as mnf: - log.debug( - '`greenback` is not installed.\n' - 'No sync debug support!\n' - ) - _gb_mod = False - - if raise_not_found: - raise RuntimeError( - 'The `greenback` lib is required to use `tractor.pause_from_sync()`!\n' - 'https://github.com/oremanj/greenback\n' - ) from mnf - - return False - - -async def maybe_init_greenback(**kwargs) -> None|ModuleType: - try: - if mod := maybe_import_greenback(**kwargs): - await mod.ensure_portal() - log.devx( - '`greenback` portal opened!\n' - 'Sync debug support activated!\n' - ) - return mod - except BaseException: - log.exception('Failed to init `greenback`..') - raise - - return None - - -async def _pause_from_bg_root_thread( - behalf_of_thread: Thread, - repl: PdbREPL, - hide_tb: bool, - task_status: TaskStatus[Task] = trio.TASK_STATUS_IGNORED, - **_pause_kwargs, -): - ''' - Acquire the `Lock._debug_lock` from a bg (only need for - root-actor) non-`trio` thread (started via a call to - `.to_thread.run_sync()` in some actor) by scheduling this func in - the actor's service (TODO eventually a special debug_mode) - nursery. This task acquires the lock then `.started()`s the - `DebugStatus.repl_release: trio.Event` waits for the `PdbREPL` to - set it, then terminates very much the same way as - `request_root_stdio_lock()` uses an IPC `Context` from a subactor - to do the same from a remote process. - - This task is normally only required to be scheduled for the - special cases of a bg sync thread running in the root actor; see - the only usage inside `.pause_from_sync()`. - - ''' - global Lock - # TODO: unify this copied code with where it was - # from in `maybe_wait_for_debugger()` - # if ( - # Lock.req_handler_finished is not None - # and not Lock.req_handler_finished.is_set() - # and (in_debug := Lock.ctx_in_debug) - # ): - # log.devx( - # '\nRoot is waiting on tty lock to release from\n\n' - # # f'{caller_frame_info}\n' - # ) - # with trio.CancelScope(shield=True): - # await Lock.req_handler_finished.wait() - - # log.pdb( - # f'Subactor released debug lock\n' - # f'|_{in_debug}\n' - # ) - task: Task = current_task() - - # Manually acquire since otherwise on release we'll - # get a RTE raised by `trio` due to ownership.. - log.devx( - 'Trying to acquire `Lock` on behalf of bg thread\n' - f'|_{behalf_of_thread}\n' - ) - - # NOTE: this is already a task inside the main-`trio`-thread, so - # we don't need to worry about calling it another time from the - # bg thread on which who's behalf this task is operating. - DebugStatus.shield_sigint() - - out = await _pause( - debug_func=None, - repl=repl, - hide_tb=hide_tb, - called_from_sync=True, - called_from_bg_thread=True, - **_pause_kwargs - ) - DebugStatus.repl_task = behalf_of_thread - - lock: trio.FIFOLock = Lock._debug_lock - stats: trio.LockStatistics= lock.statistics() - assert stats.owner is task - assert Lock._owned_by_root - assert DebugStatus.repl_release - - # TODO: do we actually need this? - # originally i was trying to solve wy this was - # unblocking too soon in a thread but it was actually - # that we weren't setting our own `repl_release` below.. - while stats.owner is not task: - log.devx( - 'Trying to acquire `._debug_lock` from {stats.owner} for\n' - f'|_{behalf_of_thread}\n' - ) - await lock.acquire() - break - - # XXX NOTE XXX super important dawg.. - # set our own event since the current one might - # have already been overriden and then set when the - # last REPL mutex holder exits their sesh! - # => we do NOT want to override any existing one - # and we want to ensure we set our own ONLY AFTER we have - # acquired the `._debug_lock` - repl_release = DebugStatus.repl_release = trio.Event() - - # unblock caller thread delivering this bg task - log.devx( - 'Unblocking root-bg-thread since we acquired lock via `._pause()`\n' - f'|_{behalf_of_thread}\n' - ) - task_status.started(out) - - # wait for bg thread to exit REPL sesh. - try: - await repl_release.wait() - finally: - log.devx( - 'releasing lock from bg root thread task!\n' - f'|_ {behalf_of_thread}\n' - ) - Lock.release() - - -def pause_from_sync( - hide_tb: bool = True, - called_from_builtin: bool = False, - api_frame: FrameType|None = None, - - allow_no_runtime: bool = False, - - # proxy to `._pause()`, for ex: - # shield: bool = False, - # api_frame: FrameType|None = None, - **_pause_kwargs, - -) -> None: - ''' - Pause a `tractor` scheduled task or thread from sync (non-async - function) code. - - When `greenback` is installed we remap python's builtin - `breakpoint()` hook to this runtime-aware version which takes - care of all bg-thread detection and appropriate synchronization - with the root actor's `Lock` to avoid mult-thread/process REPL - clobbering Bo - - ''' - __tracebackhide__: bool = hide_tb - repl_owner: Task|Thread|None = None - try: - actor: tractor.Actor = current_actor( - err_on_no_runtime=False, - ) - if ( - not actor - and - not allow_no_runtime - ): - raise NoRuntime( - 'The actor runtime has not been opened?\n\n' - '`tractor.pause_from_sync()` is not functional without a wrapping\n' - '- `async with tractor.open_nursery()` or,\n' - '- `async with tractor.open_root_actor()`\n\n' - - 'If you are getting this from a builtin `breakpoint()` call\n' - 'it might mean the runtime was started then ' - 'stopped prematurely?\n' - ) - message: str = ( - f'{actor.uid} task called `tractor.pause_from_sync()`\n' - ) - - repl: PdbREPL = mk_pdb() - - # message += f'-> created local REPL {repl}\n' - is_trio_thread: bool = DebugStatus.is_main_trio_thread() - is_root: bool = is_root_process() - is_infected_aio: bool = actor.is_infected_aio() - thread: Thread = threading.current_thread() - - asyncio_task: asyncio.Task|None = None - if is_infected_aio: - asyncio_task = asyncio.current_task() - - # TODO: we could also check for a non-`.to_thread` context - # using `trio.from_thread.check_cancelled()` (says - # oremanj) wherein we get the following outputs: - # - # `RuntimeError`: non-`.to_thread` spawned thread - # noop: non-cancelled `.to_thread` - # `trio.Cancelled`: cancelled `.to_thread` - - # when called from a (bg) thread, run an async task in a new - # thread which will call `._pause()` manually with special - # handling for root-actor caller usage. - if ( - not is_trio_thread - and - not asyncio_task - ): - # TODO: `threading.Lock()` this so we don't get races in - # multi-thr cases where they're acquiring/releasing the - # REPL and setting request/`Lock` state, etc.. - repl_owner: Thread = thread - - # TODO: make root-actor bg thread usage work! - if is_root: - message += ( - f'-> called from a root-actor bg {thread}\n' - ) - - message += ( - '-> scheduling `._pause_from_bg_root_thread()`..\n' - ) - # XXX SUBTLE BADNESS XXX that should really change! - # don't over-write the `repl` here since when - # this behalf-of-bg_thread-task calls pause it will - # pass `debug_func=None` which will result in it - # returing a `repl==None` output and that get's also - # `.started(out)` back here! So instead just ignore - # that output and assign the `repl` created above! - bg_task, _ = trio.from_thread.run( - afn=partial( - actor._service_n.start, - partial( - _pause_from_bg_root_thread, - behalf_of_thread=thread, - repl=repl, - hide_tb=hide_tb, - **_pause_kwargs, - ), - ), - ) - DebugStatus.shield_sigint() - message += ( - f'-> `._pause_from_bg_root_thread()` started bg task {bg_task}\n' - ) - else: - message += f'-> called from a bg {thread}\n' - # NOTE: since this is a subactor, `._pause()` will - # internally issue a debug request via - # `request_root_stdio_lock()` and we don't need to - # worry about all the special considerations as with - # the root-actor per above. - bg_task, _ = trio.from_thread.run( - afn=partial( - _pause, - debug_func=None, - repl=repl, - hide_tb=hide_tb, - - # XXX to prevent `._pause()` for setting - # `DebugStatus.repl_task` to the gb task! - called_from_sync=True, - called_from_bg_thread=True, - - **_pause_kwargs - ), - ) - # ?TODO? XXX where do we NEED to call this in the - # subactor-bg-thread case? - DebugStatus.shield_sigint() - assert bg_task is not DebugStatus.repl_task - - # TODO: once supported, remove this AND the one - # inside `._pause()`! - # outstanding impl fixes: - # -[ ] need to make `.shield_sigint()` below work here! - # -[ ] how to handle `asyncio`'s new SIGINT-handler - # injection? - # -[ ] should `breakpoint()` work and what does it normally - # do in `asyncio` ctxs? - # if actor.is_infected_aio(): - # raise RuntimeError( - # '`tractor.pause[_from_sync]()` not yet supported ' - # 'for infected `asyncio` mode!' - # ) - elif ( - not is_trio_thread - and - is_infected_aio # as in, the special actor-runtime mode - # ^NOTE XXX, that doesn't mean the caller is necessarily - # an `asyncio.Task` just that `trio` has been embedded on - # the `asyncio` event loop! - and - asyncio_task # transitive caller is an actual `asyncio.Task` - ): - greenback: ModuleType = maybe_import_greenback() - - if greenback.has_portal(): - DebugStatus.shield_sigint() - fute: asyncio.Future = run_trio_task_in_future( - partial( - _pause, - debug_func=None, - repl=repl, - hide_tb=hide_tb, - - # XXX to prevent `._pause()` for setting - # `DebugStatus.repl_task` to the gb task! - called_from_sync=True, - called_from_bg_thread=True, - - **_pause_kwargs - ) - ) - repl_owner = asyncio_task - bg_task, _ = greenback.await_(fute) - # TODO: ASYNC version -> `.pause_from_aio()`? - # bg_task, _ = await fute - - # handle the case where an `asyncio` task has been - # spawned WITHOUT enabling a `greenback` portal.. - # => can often happen in 3rd party libs. - else: - bg_task = repl_owner - - # TODO, ostensibly we can just acquire the - # debug lock directly presuming we're the - # root actor running in infected asyncio - # mode? - # - # TODO, this would be a special case where - # a `_pause_from_root()` would come in very - # handy! - # if is_root: - # import pdbp; pdbp.set_trace() - # log.warning( - # 'Allowing `asyncio` task to acquire debug-lock in root-actor..\n' - # 'This is not fully implemented yet; there may be teardown hangs!\n\n' - # ) - # else: - - # simply unsupported, since there exists no hack (i - # can think of) to workaround this in a subactor - # which needs to lock the root's REPL ow we're sure - # to get prompt stdstreams clobbering.. - cf_repr: str = '' - if api_frame: - caller_frame: FrameType = api_frame.f_back - cf_repr: str = f'caller_frame: {caller_frame!r}\n' - - raise RuntimeError( - f"CAN'T USE `greenback._await()` without a portal !?\n\n" - f'Likely this task was NOT spawned via the `tractor.to_asyncio` API..\n' - f'{asyncio_task}\n' - f'{cf_repr}\n' - - f'Prolly the task was started out-of-band (from some lib?)\n' - f'AND one of the below was never called ??\n' - f'- greenback.ensure_portal()\n' - f'- greenback.bestow_portal()\n' - ) - - else: # we are presumably the `trio.run()` + main thread - # raises on not-found by default - greenback: ModuleType = maybe_import_greenback() - - # TODO: how to ensure this is either dynamically (if - # needed) called here (in some bg tn??) or that the - # subactor always already called it? - # greenback: ModuleType = await maybe_init_greenback() - - message += f'-> imported {greenback}\n' - - # NOTE XXX seems to need to be set BEFORE the `_pause()` - # invoke using gb below? - DebugStatus.shield_sigint() - repl_owner: Task = current_task() - - message += '-> calling `greenback.await_(_pause(debug_func=None))` from sync caller..\n' - try: - out = greenback.await_( - _pause( - debug_func=None, - repl=repl, - hide_tb=hide_tb, - called_from_sync=True, - **_pause_kwargs, - ) - ) - except RuntimeError as rte: - if not _state._runtime_vars.get( - 'use_greenback', - False, - ): - raise RuntimeError( - '`greenback` was never initialized in this actor!?\n\n' - f'{_state._runtime_vars}\n' - ) from rte - - raise - - if out: - bg_task, _ = out - else: - bg_task: Task = current_task() - - # assert repl is repl - # assert bg_task is repl_owner - if bg_task is not repl_owner: - raise DebugStateError( - f'The registered bg task for this debug request is NOT its owner ??\n' - f'bg_task: {bg_task}\n' - f'repl_owner: {repl_owner}\n\n' - - f'{DebugStatus.repr()}\n' - ) - - # NOTE: normally set inside `_enter_repl_sync()` - DebugStatus.repl_task: str = repl_owner - - # TODO: ensure we aggressively make the user aware about - # entering the global `breakpoint()` built-in from sync - # code? - message += ( - f'-> successfully scheduled `._pause()` in `trio` thread on behalf of {bg_task}\n' - f'-> Entering REPL via `tractor._set_trace()` from caller {repl_owner}\n' - ) - log.devx(message) - - # NOTE set as late as possible to avoid state clobbering - # in the multi-threaded case! - DebugStatus.repl = repl - - _set_trace( - api_frame=api_frame or inspect.currentframe(), - repl=repl, - hide_tb=hide_tb, - actor=actor, - task=repl_owner, - ) - # LEGACY NOTE on next LOC's frame showing weirdness.. - # - # XXX NOTE XXX no other LOC can be here without it - # showing up in the REPL's last stack frame !?! - # -[ ] tried to use `@pdbp.hideframe` decoration but - # still doesn't work - except BaseException as err: - log.exception( - 'Failed to sync-pause from\n\n' - f'{repl_owner}\n' - ) - __tracebackhide__: bool = False - raise err - - -def _sync_pause_from_builtin( - *args, - called_from_builtin=True, - **kwargs, -) -> None: - ''' - Proxy call `.pause_from_sync()` but indicate the caller is the - `breakpoint()` built-in. - - Note: this assigned to `os.environ['PYTHONBREAKPOINT']` inside `._root` - - ''' - pause_from_sync( - *args, - called_from_builtin=True, - api_frame=inspect.currentframe(), - **kwargs, - ) - - -# NOTE prefer a new "pause" semantic since it better describes -# "pausing the actor's runtime" for this particular -# paralell task to do debugging in a REPL. -async def breakpoint( - hide_tb: bool = True, - **kwargs, -): - log.warning( - '`tractor.breakpoint()` is deprecated!\n' - 'Please use `tractor.pause()` instead!\n' - ) - __tracebackhide__: bool = hide_tb - await pause( - api_frame=inspect.currentframe(), - **kwargs, - ) - - -_crash_msg: str = ( - 'Opening a pdb REPL in crashed actor' -) - - -def _post_mortem( - repl: PdbREPL, # normally passed by `_pause()` - - # XXX all `partial`-ed in by `post_mortem()` below! - tb: TracebackType, - api_frame: FrameType, - - shield: bool = False, - hide_tb: bool = False, - -) -> None: - ''' - Enter the ``pdbpp`` port mortem entrypoint using our custom - debugger instance. - - ''' - __tracebackhide__: bool = hide_tb - try: - actor: tractor.Actor = current_actor() - actor_repr: str = str(actor.uid) - # ^TODO, instead a nice runtime-info + maddr + uid? - # -[ ] impl a `Actor.__repr()__`?? - # |_ : @ - # no_runtime: bool = False - - except NoRuntime: - actor_repr: str = '' - # no_runtime: bool = True - - try: - task_repr: Task = current_task() - except RuntimeError: - task_repr: str = '' - - # TODO: print the actor supervion tree up to the root - # here! Bo - log.pdb( - f'{_crash_msg}\n' - f'x>(\n' - f' |_ {task_repr} @ {actor_repr}\n' - - ) - - # NOTE only replacing this from `pdbp.xpm()` to add the - # `end=''` to the print XD - print(traceback.format_exc(), end='') - - caller_frame: FrameType = api_frame.f_back - - # NOTE: see the impl details of followings to understand usage: - # - `pdbp.post_mortem()` - # - `pdbp.xps()` - # - `bdb.interaction()` - repl.reset() - repl.interaction( - frame=caller_frame, - # frame=None, - traceback=tb, - ) - # XXX NOTE XXX: absolutely required to avoid hangs! - # Since we presume the post-mortem was enaged to a task-ending - # error, we MUST release the local REPL request so that not other - # local task nor the root remains blocked! - # if not no_runtime: - # DebugStatus.release() - DebugStatus.release() - - -async def post_mortem( - *, - tb: TracebackType|None = None, - api_frame: FrameType|None = None, - hide_tb: bool = False, - - # TODO: support shield here just like in `pause()`? - # shield: bool = False, - - **_pause_kwargs, - -) -> None: - ''' - `tractor`'s builtin async equivalient of `pdb.post_mortem()` - which can be used inside exception handlers. - - It's also used for the crash handler when `debug_mode == True` ;) - - ''' - __tracebackhide__: bool = hide_tb - - tb: TracebackType = tb or sys.exc_info()[2] - - # TODO: do upward stack scan for highest @api_frame and - # use its parent frame as the expected user-app code - # interact point. - api_frame: FrameType = api_frame or inspect.currentframe() - - await _pause( - debug_func=partial( - _post_mortem, - api_frame=api_frame, - tb=tb, - ), - hide_tb=hide_tb, - **_pause_kwargs - ) - - -async def _maybe_enter_pm( - err: BaseException, - *, - tb: TracebackType|None = None, - api_frame: FrameType|None = None, - hide_tb: bool = False, - - # only enter debugger REPL when returns `True` - debug_filter: Callable[ - [BaseException|BaseExceptionGroup], - bool, - ] = lambda err: not is_multi_cancelled(err), - -): - if ( - debug_mode() - - # NOTE: don't enter debug mode recursively after quitting pdb - # Iow, don't re-enter the repl if the `quit` command was issued - # by the user. - and not isinstance(err, bdb.BdbQuit) - - # XXX: if the error is the likely result of runtime-wide - # cancellation, we don't want to enter the debugger since - # there's races between when the parent actor has killed all - # comms and when the child tries to contact said parent to - # acquire the tty lock. - - # Really we just want to mostly avoid catching KBIs here so there - # might be a simpler check we can do? - and - debug_filter(err) - ): - api_frame: FrameType = api_frame or inspect.currentframe() - tb: TracebackType = tb or sys.exc_info()[2] - await post_mortem( - api_frame=api_frame, - tb=tb, - ) - return True - - else: - return False - - -@acm -async def acquire_debug_lock( - subactor_uid: tuple[str, str], -) -> AsyncGenerator[ - trio.CancelScope|None, - tuple, -]: - ''' - Request to acquire the TTY `Lock` in the root actor, release on - exit. - - This helper is for actor's who don't actually need to acquired - the debugger but want to wait until the lock is free in the - process-tree root such that they don't clobber an ongoing pdb - REPL session in some peer or child! - - ''' - if not debug_mode(): - yield None - return - - task: Task = current_task() - async with trio.open_nursery() as n: - ctx: Context = await n.start( - partial( - request_root_stdio_lock, - actor_uid=subactor_uid, - task_uid=(task.name, id(task)), - ) - ) - yield ctx - ctx.cancel() - - -async def maybe_wait_for_debugger( - poll_steps: int = 2, - poll_delay: float = 0.1, - child_in_debug: bool = False, - - header_msg: str = '', - _ll: str = 'devx', - -) -> bool: # was locked and we polled? - - if ( - not debug_mode() - and - not child_in_debug - ): - return False - - logmeth: Callable = getattr(log, _ll) - - msg: str = header_msg - if ( - is_root_process() - ): - # If we error in the root but the debugger is - # engaged we don't want to prematurely kill (and - # thus clobber access to) the local tty since it - # will make the pdb repl unusable. - # Instead try to wait for pdb to be released before - # tearing down. - ctx_in_debug: Context|None = Lock.ctx_in_debug - in_debug: tuple[str, str]|None = ( - ctx_in_debug.chan.uid - if ctx_in_debug - else None - ) - if in_debug == current_actor().uid: - log.debug( - msg - + - 'Root already owns the TTY LOCK' - ) - return True - - elif in_debug: - msg += ( - f'Debug `Lock` in use by subactor\n|\n|_{in_debug}\n' - ) - # TODO: could this make things more deterministic? - # wait to see if a sub-actor task will be - # scheduled and grab the tty lock on the next - # tick? - # XXX => but it doesn't seem to work.. - # await trio.testing.wait_all_tasks_blocked(cushion=0) - else: - logmeth( - msg - + - 'Root immediately acquired debug TTY LOCK' - ) - return False - - for istep in range(poll_steps): - if ( - Lock.req_handler_finished is not None - and not Lock.req_handler_finished.is_set() - and in_debug is not None - ): - # caller_frame_info: str = pformat_caller_frame() - logmeth( - msg - + - '\n^^ Root is waiting on tty lock release.. ^^\n' - # f'{caller_frame_info}\n' - ) - - if not any_connected_locker_child(): - Lock.get_locking_task_cs().cancel() - - with trio.CancelScope(shield=True): - await Lock.req_handler_finished.wait() - - log.devx( - f'Subactor released debug lock\n' - f'|_{in_debug}\n' - ) - break - - # is no subactor locking debugger currently? - if ( - in_debug is None - and ( - Lock.req_handler_finished is None - or Lock.req_handler_finished.is_set() - ) - ): - logmeth( - msg - + - 'Root acquired tty lock!' - ) - break - - else: - logmeth( - 'Root polling for debug:\n' - f'poll step: {istep}\n' - f'poll delya: {poll_delay}\n\n' - f'{Lock.repr()}\n' - ) - with CancelScope(shield=True): - await trio.sleep(poll_delay) - continue - - return True - - # else: - # # TODO: non-root call for #320? - # this_uid: tuple[str, str] = current_actor().uid - # async with acquire_debug_lock( - # subactor_uid=this_uid, - # ): - # pass - return False - - -class BoxedMaybeException(Struct): - ''' - Box a maybe-exception for post-crash introspection usage - from the body of a `open_crash_handler()` scope. - - ''' - value: BaseException|None = None - - -# TODO: better naming and what additionals? -# - [ ] optional runtime plugging? -# - [ ] detection for sync vs. async code? -# - [ ] specialized REPL entry when in distributed mode? -# -[x] hide tb by def -# - [x] allow ignoring kbi Bo -@cm -def open_crash_handler( - catch: set[BaseException] = { - BaseException, - }, - ignore: set[BaseException] = { - KeyboardInterrupt, - trio.Cancelled, - }, - tb_hide: bool = True, -): - ''' - Generic "post mortem" crash handler using `pdbp` REPL debugger. - - We expose this as a CLI framework addon to both `click` and - `typer` users so they can quickly wrap cmd endpoints which get - automatically wrapped to use the runtime's `debug_mode: bool` - AND `pdbp.pm()` around any code that is PRE-runtime entry - - any sync code which runs BEFORE the main call to - `trio.run()`. - - ''' - __tracebackhide__: bool = tb_hide - - # TODO, yield a `outcome.Error`-like boxed type? - # -[~] use `outcome.Value/Error` X-> frozen! - # -[x] write our own..? - # -[ ] consider just wtv is used by `pytest.raises()`? - # - boxed_maybe_exc = BoxedMaybeException() - err: BaseException - try: - yield boxed_maybe_exc - except tuple(catch) as err: - boxed_maybe_exc.value = err - if ( - type(err) not in ignore - and - not is_multi_cancelled( - err, - ignore_nested=ignore - ) - ): - try: - # use our re-impl-ed version - _post_mortem( - repl=mk_pdb(), - tb=sys.exc_info()[2], - api_frame=inspect.currentframe().f_back, - ) - except bdb.BdbQuit: - __tracebackhide__: bool = False - raise err - - # XXX NOTE, `pdbp`'s version seems to lose the up-stack - # tb-info? - # pdbp.xpm() - - raise err - - -@cm -def maybe_open_crash_handler( - pdb: bool = False, - tb_hide: bool = True, - - **kwargs, -): - ''' - Same as `open_crash_handler()` but with bool input flag - to allow conditional handling. - - Normally this is used with CLI endpoints such that if the --pdb - flag is passed the pdb REPL is engaed on any crashes B) - ''' - __tracebackhide__: bool = tb_hide - - rtctx = nullcontext( - enter_result=BoxedMaybeException() - ) - if pdb: - rtctx = open_crash_handler(**kwargs) - - with rtctx as boxed_maybe_exc: - yield boxed_maybe_exc diff --git a/tractor/devx/_frame_stack.py b/tractor/devx/_frame_stack.py index 8e9bf46f..c99d3ecd 100644 --- a/tractor/devx/_frame_stack.py +++ b/tractor/devx/_frame_stack.py @@ -20,13 +20,18 @@ as it pertains to improving the grok-ability of our runtime! ''' from __future__ import annotations +from contextlib import ( + _GeneratorContextManager, + _AsyncGeneratorContextManager, +) from functools import partial import inspect +import textwrap from types import ( FrameType, FunctionType, MethodType, - # CodeType, + CodeType, ) from typing import ( Any, @@ -34,6 +39,9 @@ from typing import ( Type, ) +import pdbp +from tractor.log import get_logger +import trio from tractor.msg import ( pretty_struct, NamespacePath, @@ -41,6 +49,8 @@ from tractor.msg import ( import wrapt +log = get_logger(__name__) + # TODO: yeah, i don't love this and we should prolly just # write a decorator that actually keeps a stupid ref to the func # obj.. @@ -301,3 +311,70 @@ def api_frame( # error_set: set[BaseException], # ) -> TracebackType: # ... + + +def hide_runtime_frames() -> dict[FunctionType, CodeType]: + ''' + Hide call-stack frames for various std-lib and `trio`-API primitives + such that the tracebacks presented from our runtime are as minimized + as possible, particularly from inside a `PdbREPL`. + + ''' + # XXX HACKZONE XXX + # hide exit stack frames on nurseries and cancel-scopes! + # |_ so avoid seeing it when the `pdbp` REPL is first engaged from + # inside a `trio.open_nursery()` scope (with no line after it + # in before the block end??). + # + # TODO: FINALLY got this workin originally with + # `@pdbp.hideframe` around the `wrapper()` def embedded inside + # `_ki_protection_decoratior()`.. which is in the module: + # /home/goodboy/.virtualenvs/tractor311/lib/python3.11/site-packages/trio/_core/_ki.py + # + # -[ ] make an issue and patch for `trio` core? maybe linked + # to the long outstanding `pdb` one below? + # |_ it's funny that there's frame hiding throughout `._run.py` + # but not where it matters on the below exit funcs.. + # + # -[ ] provide a patchset for the lonstanding + # |_ https://github.com/python-trio/trio/issues/1155 + # + # -[ ] make a linked issue to ^ and propose allowing all the + # `._core._run` code to have their `__tracebackhide__` value + # configurable by a `RunVar` to allow getting scheduler frames + # if desired through configuration? + # + # -[ ] maybe dig into the core `pdb` issue why the extra frame is shown + # at all? + # + funcs: list[FunctionType] = [ + trio._core._run.NurseryManager.__aexit__, + trio._core._run.CancelScope.__exit__, + _GeneratorContextManager.__exit__, + _AsyncGeneratorContextManager.__aexit__, + _AsyncGeneratorContextManager.__aenter__, + trio.Event.wait, + ] + func_list_str: str = textwrap.indent( + "\n".join(f.__qualname__ for f in funcs), + prefix=' |_ ', + ) + log.devx( + 'Hiding the following runtime frames by default:\n' + f'{func_list_str}\n' + ) + + codes: dict[FunctionType, CodeType] = {} + for ref in funcs: + # stash a pre-modified version of each ref's code-obj + # so it can be reverted later if needed. + codes[ref] = ref.__code__ + pdbp.hideframe(ref) + # + # pdbp.hideframe(trio._core._run.NurseryManager.__aexit__) + # pdbp.hideframe(trio._core._run.CancelScope.__exit__) + # pdbp.hideframe(_GeneratorContextManager.__exit__) + # pdbp.hideframe(_AsyncGeneratorContextManager.__aexit__) + # pdbp.hideframe(_AsyncGeneratorContextManager.__aenter__) + # pdbp.hideframe(trio.Event.wait) + return codes diff --git a/tractor/devx/_stackscope.py b/tractor/devx/_stackscope.py index ccc46534..11d2a1ef 100644 --- a/tractor/devx/_stackscope.py +++ b/tractor/devx/_stackscope.py @@ -49,7 +49,7 @@ from tractor import ( _state, log as logmod, ) -from tractor.devx import _debug +from tractor.devx import debug log = logmod.get_logger(__name__) @@ -82,7 +82,7 @@ def dump_task_tree() -> None: if ( current_sigint_handler is not - _debug.DebugStatus._trio_handler + debug.DebugStatus._trio_handler ): sigint_handler_report: str = ( 'The default `trio` SIGINT handler was replaced?!' @@ -238,7 +238,8 @@ def enable_stack_on_sig( import stackscope except ImportError: log.warning( - '`stackscope` not installed for use in debug mode!' + 'The `stackscope` lib is not installed!\n' + '`Ignoring enable_stack_on_sig() call!\n' ) return None @@ -255,8 +256,8 @@ def enable_stack_on_sig( dump_tree_on_sig, ) log.devx( - 'Enabling trace-trees on `SIGUSR1` ' - 'since `stackscope` is installed @ \n' + f'Enabling trace-trees on `SIGUSR1` ' + f'since `stackscope` is installed @ \n' f'{stackscope!r}\n\n' f'With `SIGUSR1` handler\n' f'|_{dump_tree_on_sig}\n' diff --git a/tractor/devx/debug/__init__.py b/tractor/devx/debug/__init__.py new file mode 100644 index 00000000..faf9f2f7 --- /dev/null +++ b/tractor/devx/debug/__init__.py @@ -0,0 +1,100 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Multi-actor debugging for da peeps! + +''' +from __future__ import annotations +from tractor.log import get_logger +from ._repl import ( + PdbREPL as PdbREPL, + mk_pdb as mk_pdb, + TractorConfig as TractorConfig, +) +from ._tty_lock import ( + DebugStatus as DebugStatus, + DebugStateError as DebugStateError, +) +from ._trace import ( + Lock as Lock, + _pause_msg as _pause_msg, + _repl_fail_msg as _repl_fail_msg, + _set_trace as _set_trace, + _sync_pause_from_builtin as _sync_pause_from_builtin, + breakpoint as breakpoint, + maybe_init_greenback as maybe_init_greenback, + maybe_import_greenback as maybe_import_greenback, + pause as pause, + pause_from_sync as pause_from_sync, +) +from ._post_mortem import ( + BoxedMaybeException as BoxedMaybeException, + maybe_open_crash_handler as maybe_open_crash_handler, + open_crash_handler as open_crash_handler, + post_mortem as post_mortem, + _crash_msg as _crash_msg, + _maybe_enter_pm as _maybe_enter_pm, +) +from ._sync import ( + maybe_wait_for_debugger as maybe_wait_for_debugger, + acquire_debug_lock as acquire_debug_lock, +) +from ._sigint import ( + sigint_shield as sigint_shield, + _ctlc_ignore_header as _ctlc_ignore_header +) + +log = get_logger(__name__) + +# ---------------- +# XXX PKG TODO XXX +# ---------------- +# refine the internal impl and APIs! +# +# -[ ] rework `._pause()` and it's branch-cases for root vs. +# subactor: +# -[ ] `._pause_from_root()` + `_pause_from_subactor()`? +# -[ ] do the de-factor based on bg-thread usage in +# `.pause_from_sync()` & `_pause_from_bg_root_thread()`. +# -[ ] drop `debug_func == None` case which is confusing af.. +# -[ ] factor out `_enter_repl_sync()` into a util func for calling +# the `_set_trace()` / `_post_mortem()` APIs? +# +# -[ ] figure out if we need `acquire_debug_lock()` and/or re-implement +# it as part of the `.pause_from_sync()` rework per above? +# +# -[ ] pair the `._pause_from_subactor()` impl with a "debug nursery" +# that's dynamically allocated inside the `._rpc` task thus +# avoiding the `._service_n.start()` usage for the IPC request? +# -[ ] see the TODO inside `._rpc._errors_relayed_via_ipc()` +# +# -[ ] impl a `open_debug_request()` which encaps all +# `request_root_stdio_lock()` task scheduling deats +# + `DebugStatus` state mgmt; which should prolly be re-branded as +# a `DebugRequest` type anyway AND with suppoort for bg-thread +# (from root actor) usage? +# +# -[ ] handle the `xonsh` case for bg-root-threads in the SIGINT +# handler! +# -[ ] do we need to do the same for subactors? +# -[ ] make the failing tests finally pass XD +# +# -[ ] simplify `maybe_wait_for_debugger()` to be a root-task only +# API? +# -[ ] currently it's implemented as that so might as well make it +# formal? diff --git a/tractor/devx/debug/_post_mortem.py b/tractor/devx/debug/_post_mortem.py new file mode 100644 index 00000000..32d10074 --- /dev/null +++ b/tractor/devx/debug/_post_mortem.py @@ -0,0 +1,412 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Post-mortem debugging APIs and surrounding machinery for both +sync and async contexts. + +Generally we maintain the same semantics a `pdb.post.mortem()` but +with actor-tree-wide sync/cooperation around any (sub)actor's use of +the root's TTY. + +''' +from __future__ import annotations +import bdb +from contextlib import ( + AbstractContextManager, + contextmanager as cm, + nullcontext, +) +from functools import ( + partial, +) +import inspect +import sys +import traceback +from typing import ( + Callable, + Sequence, + Type, + TYPE_CHECKING, +) +from types import ( + TracebackType, + FrameType, +) + +from msgspec import Struct +import trio +from tractor._exceptions import ( + NoRuntime, +) +from tractor import _state +from tractor._state import ( + current_actor, + debug_mode, +) +from tractor.log import get_logger +from tractor.trionics import ( + is_multi_cancelled, +) +from ._trace import ( + _pause, +) +from ._tty_lock import ( + DebugStatus, +) +from ._repl import ( + PdbREPL, + mk_pdb, + TractorConfig as TractorConfig, +) + +if TYPE_CHECKING: + from trio.lowlevel import Task + from tractor._runtime import ( + Actor, + ) + +_crash_msg: str = ( + 'Opening a pdb REPL in crashed actor' +) + +log = get_logger(__package__) + + +class BoxedMaybeException(Struct): + ''' + Box a maybe-exception for post-crash introspection usage + from the body of a `open_crash_handler()` scope. + + ''' + value: BaseException|None = None + + # handler can suppress crashes dynamically + raise_on_exit: bool|Sequence[Type[BaseException]] = True + + def pformat(self) -> str: + ''' + Repr the boxed `.value` error in more-than-string + repr form. + + ''' + if not self.value: + return f'<{type(self).__name__}( .value=None )>' + + return ( + f'<{type(self.value).__name__}(\n' + f' |_.value = {self.value}\n' + f')>\n' + ) + + __repr__ = pformat + + +def _post_mortem( + repl: PdbREPL, # normally passed by `_pause()` + + # XXX all `partial`-ed in by `post_mortem()` below! + tb: TracebackType, + api_frame: FrameType, + + shield: bool = False, + hide_tb: bool = True, + + # maybe pre/post REPL entry + repl_fixture: ( + AbstractContextManager[bool] + |None + ) = None, + + boxed_maybe_exc: BoxedMaybeException|None = None, + +) -> None: + ''' + Enter the ``pdbpp`` port mortem entrypoint using our custom + debugger instance. + + ''' + __tracebackhide__: bool = hide_tb + + # maybe enter any user fixture + enter_repl: bool = DebugStatus.maybe_enter_repl_fixture( + repl=repl, + repl_fixture=repl_fixture, + boxed_maybe_exc=boxed_maybe_exc, + ) + try: + if not enter_repl: + # XXX, trigger `.release()` below immediately! + return + try: + actor: Actor = current_actor() + actor_repr: str = str(actor.uid) + # ^TODO, instead a nice runtime-info + maddr + uid? + # -[ ] impl a `Actor.__repr()__`?? + # |_ : @ + + except NoRuntime: + actor_repr: str = '' + + try: + task_repr: Task = trio.lowlevel.current_task() + except RuntimeError: + task_repr: str = '' + + # TODO: print the actor supervion tree up to the root + # here! Bo + log.pdb( + f'{_crash_msg}\n' + f'x>(\n' + f' |_ {task_repr} @ {actor_repr}\n' + + ) + + # XXX NOTE(s) on `pdbp.xpm()` version.. + # + # - seems to lose the up-stack tb-info? + # - currently we're (only) replacing this from `pdbp.xpm()` + # to add the `end=''` to the print XD + # + print(traceback.format_exc(), end='') + caller_frame: FrameType = api_frame.f_back + + # NOTE, see the impl details of these in the lib to + # understand usage: + # - `pdbp.post_mortem()` + # - `pdbp.xps()` + # - `bdb.interaction()` + repl.reset() + repl.interaction( + frame=caller_frame, + # frame=None, + traceback=tb, + ) + finally: + # XXX NOTE XXX: this is abs required to avoid hangs! + # + # Since we presume the post-mortem was enaged to + # a task-ending error, we MUST release the local REPL request + # so that not other local task nor the root remains blocked! + DebugStatus.release() + + +async def post_mortem( + *, + tb: TracebackType|None = None, + api_frame: FrameType|None = None, + hide_tb: bool = False, + + # TODO: support shield here just like in `pause()`? + # shield: bool = False, + + **_pause_kwargs, + +) -> None: + ''' + Our builtin async equivalient of `pdb.post_mortem()` which can be + used inside exception handlers. + + It's also used for the crash handler when `debug_mode == True` ;) + + ''' + __tracebackhide__: bool = hide_tb + + tb: TracebackType = tb or sys.exc_info()[2] + + # TODO: do upward stack scan for highest @api_frame and + # use its parent frame as the expected user-app code + # interact point. + api_frame: FrameType = api_frame or inspect.currentframe() + + # TODO, move to submod `._pausing` or ._api? _trace + await _pause( + debug_func=partial( + _post_mortem, + api_frame=api_frame, + tb=tb, + ), + hide_tb=hide_tb, + **_pause_kwargs + ) + + +async def _maybe_enter_pm( + err: BaseException, + *, + tb: TracebackType|None = None, + api_frame: FrameType|None = None, + hide_tb: bool = True, + + # only enter debugger REPL when returns `True` + debug_filter: Callable[ + [BaseException|BaseExceptionGroup], + bool, + ] = lambda err: not is_multi_cancelled(err), + **_pause_kws, +): + if ( + debug_mode() + + # NOTE: don't enter debug mode recursively after quitting pdb + # Iow, don't re-enter the repl if the `quit` command was issued + # by the user. + and not isinstance(err, bdb.BdbQuit) + + # XXX: if the error is the likely result of runtime-wide + # cancellation, we don't want to enter the debugger since + # there's races between when the parent actor has killed all + # comms and when the child tries to contact said parent to + # acquire the tty lock. + + # Really we just want to mostly avoid catching KBIs here so there + # might be a simpler check we can do? + and + debug_filter(err) + ): + api_frame: FrameType = api_frame or inspect.currentframe() + tb: TracebackType = tb or sys.exc_info()[2] + await post_mortem( + api_frame=api_frame, + tb=tb, + **_pause_kws, + ) + return True + + else: + return False + + +# TODO: better naming and what additionals? +# - [ ] optional runtime plugging? +# - [ ] detection for sync vs. async code? +# - [ ] specialized REPL entry when in distributed mode? +# -[x] hide tb by def +# - [x] allow ignoring kbi Bo +@cm +def open_crash_handler( + catch: set[BaseException] = { + BaseException, + }, + ignore: set[BaseException] = { + KeyboardInterrupt, + trio.Cancelled, + }, + hide_tb: bool = True, + + repl_fixture: ( + AbstractContextManager[bool] # pre/post REPL entry + |None + ) = None, + raise_on_exit: bool|Sequence[Type[BaseException]] = True, +): + ''' + Generic "post mortem" crash handler using `pdbp` REPL debugger. + + We expose this as a CLI framework addon to both `click` and + `typer` users so they can quickly wrap cmd endpoints which get + automatically wrapped to use the runtime's `debug_mode: bool` + AND `pdbp.pm()` around any code that is PRE-runtime entry + - any sync code which runs BEFORE the main call to + `trio.run()`. + + ''' + __tracebackhide__: bool = hide_tb + + # TODO, yield a `outcome.Error`-like boxed type? + # -[~] use `outcome.Value/Error` X-> frozen! + # -[x] write our own..? + # -[ ] consider just wtv is used by `pytest.raises()`? + # + boxed_maybe_exc = BoxedMaybeException( + raise_on_exit=raise_on_exit, + ) + err: BaseException + try: + yield boxed_maybe_exc + except tuple(catch) as err: + boxed_maybe_exc.value = err + if ( + type(err) not in ignore + and + not is_multi_cancelled( + err, + ignore_nested=ignore + ) + ): + try: + # use our re-impl-ed version of `pdbp.xpm()` + _post_mortem( + repl=mk_pdb(), + tb=sys.exc_info()[2], + api_frame=inspect.currentframe().f_back, + hide_tb=hide_tb, + + repl_fixture=repl_fixture, + boxed_maybe_exc=boxed_maybe_exc, + ) + except bdb.BdbQuit: + __tracebackhide__: bool = False + raise err + + if ( + raise_on_exit is True + or ( + raise_on_exit is not False + and ( + set(raise_on_exit) + and + type(err) in raise_on_exit + ) + ) + and + boxed_maybe_exc.raise_on_exit == raise_on_exit + ): + raise err + + +@cm +def maybe_open_crash_handler( + pdb: bool|None = None, + hide_tb: bool = True, + + **kwargs, +): + ''' + Same as `open_crash_handler()` but with bool input flag + to allow conditional handling. + + Normally this is used with CLI endpoints such that if the --pdb + flag is passed the pdb REPL is engaed on any crashes B) + + ''' + __tracebackhide__: bool = hide_tb + + if pdb is None: + pdb: bool = _state.is_debug_mode() + + rtctx = nullcontext( + enter_result=BoxedMaybeException() + ) + if pdb: + rtctx = open_crash_handler( + hide_tb=hide_tb, + **kwargs, + ) + + with rtctx as boxed_maybe_exc: + yield boxed_maybe_exc diff --git a/tractor/devx/debug/_repl.py b/tractor/devx/debug/_repl.py new file mode 100644 index 00000000..1c0f03cc --- /dev/null +++ b/tractor/devx/debug/_repl.py @@ -0,0 +1,207 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +`pdpp.Pdb` extentions/customization and other delegate usage. + +''' +from functools import ( + cached_property, +) +import os + +import pdbp +from tractor._state import ( + is_root_process, +) + +from ._tty_lock import ( + Lock, + DebugStatus, +) + + +class TractorConfig(pdbp.DefaultConfig): + ''' + Custom `pdbp` config which tries to use the best tradeoff + between pretty and minimal. + + ''' + use_pygments: bool = True + sticky_by_default: bool = False + enable_hidden_frames: bool = True + + # much thanks @mdmintz for the hot tip! + # fixes line spacing issue when resizing terminal B) + truncate_long_lines: bool = False + + # ------ - ------ + # our own custom config vars mostly + # for syncing with the actor tree's singleton + # TTY `Lock`. + + +class PdbREPL(pdbp.Pdb): + ''' + Add teardown hooks and local state describing any + ongoing TTY `Lock` request dialog. + + ''' + # override the pdbp config with our coolio one + # NOTE: this is only loaded when no `~/.pdbrc` exists + # so we should prolly pass it into the .__init__() instead? + # i dunno, see the `DefaultFactory` and `pdb.Pdb` impls. + DefaultConfig = TractorConfig + + status = DebugStatus + + # NOTE: see details in stdlib's `bdb.py` + # def user_exception(self, frame, exc_info): + # ''' + # Called when we stop on an exception. + # ''' + # log.warning( + # 'Exception during REPL sesh\n\n' + # f'{frame}\n\n' + # f'{exc_info}\n\n' + # ) + + # NOTE: this actually hooks but i don't see anyway to detect + # if an error was caught.. this is why currently we just always + # call `DebugStatus.release` inside `_post_mortem()`. + # def preloop(self): + # print('IN PRELOOP') + # super().preloop() + + # TODO: cleaner re-wrapping of all this? + # -[ ] figure out how to disallow recursive .set_trace() entry + # since that'll cause deadlock for us. + # -[ ] maybe a `@cm` to call `super().()`? + # -[ ] look at hooking into the `pp` hook specially with our + # own set of pretty-printers? + # * `.pretty_struct.Struct.pformat()` + # * `.pformat(MsgType.pld)` + # * `.pformat(Error.tb_str)`? + # * .. maybe more? + # + def set_continue(self): + try: + super().set_continue() + finally: + # NOTE: for subactors the stdio lock is released via the + # allocated RPC locker task, so for root we have to do it + # manually. + if ( + is_root_process() + and + Lock._debug_lock.locked() + and + DebugStatus.is_main_trio_thread() + ): + # Lock.release(raise_on_thread=False) + Lock.release() + + # XXX AFTER `Lock.release()` for root local repl usage + DebugStatus.release() + + def set_quit(self): + try: + super().set_quit() + finally: + if ( + is_root_process() + and + Lock._debug_lock.locked() + and + DebugStatus.is_main_trio_thread() + ): + # Lock.release(raise_on_thread=False) + Lock.release() + + # XXX after `Lock.release()` for root local repl usage + DebugStatus.release() + + # XXX NOTE: we only override this because apparently the stdlib pdb + # bois likes to touch the SIGINT handler as much as i like to touch + # my d$%&. + def _cmdloop(self): + self.cmdloop() + + @cached_property + def shname(self) -> str | None: + ''' + Attempt to return the login shell name with a special check for + the infamous `xonsh` since it seems to have some issues much + different from std shells when it comes to flushing the prompt? + + ''' + # SUPER HACKY and only really works if `xonsh` is not used + # before spawning further sub-shells.. + shpath = os.getenv('SHELL', None) + + if shpath: + if ( + os.getenv('XONSH_LOGIN', default=False) + or 'xonsh' in shpath + ): + return 'xonsh' + + return os.path.basename(shpath) + + return None + + +def mk_pdb() -> PdbREPL: + ''' + Deliver a new `PdbREPL`: a multi-process safe `pdbp.Pdb`-variant + using the magic of `tractor`'s SC-safe IPC. + + B) + + Our `pdb.Pdb` subtype accomplishes multi-process safe debugging + by: + + - mutexing access to the root process' std-streams (& thus parent + process TTY) via an IPC managed `Lock` singleton per + actor-process tree. + + - temporarily overriding any subactor's SIGINT handler to shield + during live REPL sessions in sub-actors such that cancellation + is never (mistakenly) triggered by a ctrl-c and instead only by + explicit runtime API requests or after the + `pdb.Pdb.interaction()` call has returned. + + FURTHER, the `pdbp.Pdb` instance is configured to be `trio` + "compatible" from a SIGINT handling perspective; we mask out + the default `pdb` handler and instead apply `trio`s default + which mostly addresses all issues described in: + + - https://github.com/python-trio/trio/issues/1155 + + The instance returned from this factory should always be + preferred over the default `pdb[p].set_trace()` whenever using + a `pdb` REPL inside a `trio` based runtime. + + ''' + pdb = PdbREPL() + + # XXX: These are the important flags mentioned in + # https://github.com/python-trio/trio/issues/1155 + # which resolve the traceback spews to console. + pdb.allow_kbdint = True + pdb.nosigint = True + return pdb diff --git a/tractor/devx/debug/_sigint.py b/tractor/devx/debug/_sigint.py new file mode 100644 index 00000000..80f79e58 --- /dev/null +++ b/tractor/devx/debug/_sigint.py @@ -0,0 +1,333 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +A custom SIGINT handler which mainly shields actor (task) +cancellation during REPL interaction. + +''' +from __future__ import annotations +from typing import ( + TYPE_CHECKING, +) +import trio +from tractor.log import get_logger +from tractor._state import ( + current_actor, + is_root_process, +) +from ._repl import ( + PdbREPL, +) +from ._tty_lock import ( + any_connected_locker_child, + DebugStatus, + Lock, +) + +if TYPE_CHECKING: + from tractor.ipc import ( + Channel, + ) + from tractor._runtime import ( + Actor, + ) + +log = get_logger(__name__) + +_ctlc_ignore_header: str = ( + 'Ignoring SIGINT while debug REPL in use' +) + + +def sigint_shield( + signum: int, + frame: 'frame', # type: ignore # noqa + *args, + +) -> None: + ''' + Specialized, debugger-aware SIGINT handler. + + In childred we always ignore/shield for SIGINT to avoid + deadlocks since cancellation should always be managed by the + supervising parent actor. The root actor-proces is always + cancelled on ctrl-c. + + ''' + __tracebackhide__: bool = True + actor: Actor = current_actor() + + def do_cancel(): + # If we haven't tried to cancel the runtime then do that instead + # of raising a KBI (which may non-gracefully destroy + # a ``trio.run()``). + if not actor._cancel_called: + actor.cancel_soon() + + # If the runtime is already cancelled it likely means the user + # hit ctrl-c again because teardown didn't fully take place in + # which case we do the "hard" raising of a local KBI. + else: + raise KeyboardInterrupt + + # only set in the actor actually running the REPL + repl: PdbREPL|None = DebugStatus.repl + + # TODO: maybe we should flatten out all these cases using + # a match/case? + # + # root actor branch that reports whether or not a child + # has locked debugger. + if is_root_process(): + # log.warning( + log.devx( + 'Handling SIGINT in root actor\n' + f'{Lock.repr()}' + f'{DebugStatus.repr()}\n' + ) + # try to see if the supposed (sub)actor in debug still + # has an active connection to *this* actor, and if not + # it's likely they aren't using the TTY lock / debugger + # and we should propagate SIGINT normally. + any_connected: bool = any_connected_locker_child() + + problem = ( + f'root {actor.uid} handling SIGINT\n' + f'any_connected: {any_connected}\n\n' + + f'{Lock.repr()}\n' + ) + + if ( + (ctx := Lock.ctx_in_debug) + and + (uid_in_debug := ctx.chan.uid) # "someone" is (ostensibly) using debug `Lock` + ): + name_in_debug: str = uid_in_debug[0] + assert not repl + # if not repl: # but it's NOT us, the root actor. + # sanity: since no repl ref is set, we def shouldn't + # be the lock owner! + assert name_in_debug != 'root' + + # IDEAL CASE: child has REPL as expected + if any_connected: # there are subactors we can contact + # XXX: only if there is an existing connection to the + # (sub-)actor in debug do we ignore SIGINT in this + # parent! Otherwise we may hang waiting for an actor + # which has already terminated to unlock. + # + # NOTE: don't emit this with `.pdb()` level in + # root without a higher level. + log.runtime( + _ctlc_ignore_header + + + f' by child ' + f'{uid_in_debug}\n' + ) + problem = None + + else: + problem += ( + '\n' + f'A `pdb` REPL is SUPPOSEDLY in use by child {uid_in_debug}\n' + f'BUT, no child actors are IPC contactable!?!?\n' + ) + + # IDEAL CASE: root has REPL as expected + else: + # root actor still has this SIGINT handler active without + # an actor using the `Lock` (a bug state) ?? + # => so immediately cancel any stale lock cs and revert + # the handler! + if not DebugStatus.repl: + # TODO: WHEN should we revert back to ``trio`` + # handler if this one is stale? + # -[ ] maybe after a counts work of ctl-c mashes? + # -[ ] use a state var like `stale_handler: bool`? + problem += ( + 'No subactor is using a `pdb` REPL according `Lock.ctx_in_debug`?\n' + 'BUT, the root should be using it, WHY this handler ??\n\n' + 'So either..\n' + '- some root-thread is using it but has no `.repl` set?, OR\n' + '- something else weird is going on outside the runtime!?\n' + ) + else: + # NOTE: since we emit this msg on ctl-c, we should + # also always re-print the prompt the tail block! + log.pdb( + _ctlc_ignore_header + + + f' by root actor..\n' + f'{DebugStatus.repl_task}\n' + f' |_{repl}\n' + ) + problem = None + + # XXX if one is set it means we ARE NOT operating an ideal + # case where a child subactor or us (the root) has the + # lock without any other detected problems. + if problem: + + # detect, report and maybe clear a stale lock request + # cancel scope. + lock_cs: trio.CancelScope = Lock.get_locking_task_cs() + maybe_stale_lock_cs: bool = ( + lock_cs is not None + and not lock_cs.cancel_called + ) + if maybe_stale_lock_cs: + problem += ( + '\n' + 'Stale `Lock.ctx_in_debug._scope: CancelScope` detected?\n' + f'{Lock.ctx_in_debug}\n\n' + + '-> Calling ctx._scope.cancel()!\n' + ) + lock_cs.cancel() + + # TODO: wen do we actually want/need this, see above. + # DebugStatus.unshield_sigint() + log.warning(problem) + + # child actor that has locked the debugger + elif not is_root_process(): + log.debug( + f'Subactor {actor.uid} handling SIGINT\n\n' + f'{Lock.repr()}\n' + ) + + rent_chan: Channel = actor._parent_chan + if ( + rent_chan is None + or + not rent_chan.connected() + ): + log.warning( + 'This sub-actor thinks it is debugging ' + 'but it has no connection to its parent ??\n' + f'{actor.uid}\n' + 'Allowing SIGINT propagation..' + ) + DebugStatus.unshield_sigint() + + repl_task: str|None = DebugStatus.repl_task + req_task: str|None = DebugStatus.req_task + if ( + repl_task + and + repl + ): + log.pdb( + _ctlc_ignore_header + + + f' by local task\n\n' + f'{repl_task}\n' + f' |_{repl}\n' + ) + elif req_task: + log.debug( + _ctlc_ignore_header + + + f' by local request-task and either,\n' + f'- someone else is already REPL-in and has the `Lock`, or\n' + f'- some other local task already is replin?\n\n' + f'{req_task}\n' + ) + + # TODO can we remove this now? + # -[ ] does this path ever get hit any more? + else: + msg: str = ( + 'SIGINT shield handler still active BUT, \n\n' + ) + if repl_task is None: + msg += ( + '- No local task claims to be in debug?\n' + ) + + if repl is None: + msg += ( + '- No local REPL is currently active?\n' + ) + + if req_task is None: + msg += ( + '- No debug request task is active?\n' + ) + + log.warning( + msg + + + 'Reverting handler to `trio` default!\n' + ) + DebugStatus.unshield_sigint() + + # XXX ensure that the reverted-to-handler actually is + # able to rx what should have been **this** KBI ;) + do_cancel() + + # TODO: how to handle the case of an intermediary-child actor + # that **is not** marked in debug mode? See oustanding issue: + # https://github.com/goodboy/tractor/issues/320 + # elif debug_mode(): + + # maybe redraw/print last REPL output to console since + # we want to alert the user that more input is expect since + # nothing has been done dur to ignoring sigint. + if ( + DebugStatus.repl # only when current actor has a REPL engaged + ): + flush_status: str = ( + 'Flushing stdout to ensure new prompt line!\n' + ) + + # XXX: yah, mega hack, but how else do we catch this madness XD + if ( + repl.shname == 'xonsh' + ): + flush_status += ( + '-> ALSO re-flushing due to `xonsh`..\n' + ) + repl.stdout.write(repl.prompt) + + # log.warning( + log.devx( + flush_status + ) + repl.stdout.flush() + + # TODO: better console UX to match the current "mode": + # -[ ] for example if in sticky mode where if there is output + # detected as written to the tty we redraw this part underneath + # and erase the past draw of this same bit above? + # repl.sticky = True + # repl._print_if_sticky() + + # also see these links for an approach from `ptk`: + # https://github.com/goodboy/tractor/issues/130#issuecomment-663752040 + # https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py + else: + log.devx( + # log.warning( + 'Not flushing stdout since not needed?\n' + f'|_{repl}\n' + ) + + # XXX only for tracing this handler + log.devx('exiting SIGINT') diff --git a/tractor/devx/debug/_sync.py b/tractor/devx/debug/_sync.py new file mode 100644 index 00000000..cf4bb334 --- /dev/null +++ b/tractor/devx/debug/_sync.py @@ -0,0 +1,220 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Debugger synchronization APIs to ensure orderly access and +non-TTY-clobbering graceful teardown. + + +''' +from __future__ import annotations +from contextlib import ( + asynccontextmanager as acm, +) +from functools import ( + partial, +) +from typing import ( + AsyncGenerator, + Callable, +) + +from tractor.log import get_logger +import trio +from trio.lowlevel import ( + current_task, + Task, +) +from tractor._context import Context +from tractor._state import ( + current_actor, + debug_mode, + is_root_process, +) +from ._repl import ( + TractorConfig as TractorConfig, +) +from ._tty_lock import ( + Lock, + request_root_stdio_lock, + any_connected_locker_child, +) +from ._sigint import ( + sigint_shield as sigint_shield, + _ctlc_ignore_header as _ctlc_ignore_header +) + +log = get_logger(__package__) + + +async def maybe_wait_for_debugger( + poll_steps: int = 2, + poll_delay: float = 0.1, + child_in_debug: bool = False, + + header_msg: str = '', + _ll: str = 'devx', + +) -> bool: # was locked and we polled? + + if ( + not debug_mode() + and + not child_in_debug + ): + return False + + logmeth: Callable = getattr(log, _ll) + + msg: str = header_msg + if ( + is_root_process() + ): + # If we error in the root but the debugger is + # engaged we don't want to prematurely kill (and + # thus clobber access to) the local tty since it + # will make the pdb repl unusable. + # Instead try to wait for pdb to be released before + # tearing down. + ctx_in_debug: Context|None = Lock.ctx_in_debug + in_debug: tuple[str, str]|None = ( + ctx_in_debug.chan.uid + if ctx_in_debug + else None + ) + if in_debug == current_actor().uid: + log.debug( + msg + + + 'Root already owns the TTY LOCK' + ) + return True + + elif in_debug: + msg += ( + f'Debug `Lock` in use by subactor\n|\n|_{in_debug}\n' + ) + # TODO: could this make things more deterministic? + # wait to see if a sub-actor task will be + # scheduled and grab the tty lock on the next + # tick? + # XXX => but it doesn't seem to work.. + # await trio.testing.wait_all_tasks_blocked(cushion=0) + else: + logmeth( + msg + + + 'Root immediately acquired debug TTY LOCK' + ) + return False + + for istep in range(poll_steps): + if ( + Lock.req_handler_finished is not None + and not Lock.req_handler_finished.is_set() + and in_debug is not None + ): + # caller_frame_info: str = pformat_caller_frame() + logmeth( + msg + + + '\n^^ Root is waiting on tty lock release.. ^^\n' + # f'{caller_frame_info}\n' + ) + + if not any_connected_locker_child(): + Lock.get_locking_task_cs().cancel() + + with trio.CancelScope(shield=True): + await Lock.req_handler_finished.wait() + + log.devx( + f'Subactor released debug lock\n' + f'|_{in_debug}\n' + ) + break + + # is no subactor locking debugger currently? + if ( + in_debug is None + and ( + Lock.req_handler_finished is None + or Lock.req_handler_finished.is_set() + ) + ): + logmeth( + msg + + + 'Root acquired tty lock!' + ) + break + + else: + logmeth( + 'Root polling for debug:\n' + f'poll step: {istep}\n' + f'poll delya: {poll_delay}\n\n' + f'{Lock.repr()}\n' + ) + with trio.CancelScope(shield=True): + await trio.sleep(poll_delay) + continue + + return True + + # else: + # # TODO: non-root call for #320? + # this_uid: tuple[str, str] = current_actor().uid + # async with acquire_debug_lock( + # subactor_uid=this_uid, + # ): + # pass + return False + + +@acm +async def acquire_debug_lock( + subactor_uid: tuple[str, str], +) -> AsyncGenerator[ + trio.CancelScope|None, + tuple, +]: + ''' + Request to acquire the TTY `Lock` in the root actor, release on + exit. + + This helper is for actor's who don't actually need to acquired + the debugger but want to wait until the lock is free in the + process-tree root such that they don't clobber an ongoing pdb + REPL session in some peer or child! + + ''' + if not debug_mode(): + yield None + return + + task: Task = current_task() + async with trio.open_nursery() as n: + ctx: Context = await n.start( + partial( + request_root_stdio_lock, + actor_uid=subactor_uid, + task_uid=(task.name, id(task)), + ) + ) + yield ctx + ctx.cancel() diff --git a/tractor/devx/debug/_trace.py b/tractor/devx/debug/_trace.py new file mode 100644 index 00000000..a23d2e23 --- /dev/null +++ b/tractor/devx/debug/_trace.py @@ -0,0 +1,1259 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Debugger/tracing public API. + +Essentially providing the same +`pdb(p).set_trace()`/`breakpoint()`-style REPL UX but with seemless +mult-process support within a single actor tree. + +''' +from __future__ import annotations +import asyncio +import bdb +from contextlib import ( + AbstractContextManager, +) +from functools import ( + partial, +) +import inspect +import threading +from typing import ( + Callable, + TYPE_CHECKING, +) +from types import ( + FrameType, + ModuleType, +) + +import trio +from trio.lowlevel import ( + current_task, + Task, +) +from trio import ( + TaskStatus, +) +import tractor +from tractor.log import get_logger +from tractor.to_asyncio import run_trio_task_in_future +from tractor._context import Context +from tractor import _state +from tractor._exceptions import ( + NoRuntime, + InternalError, +) +from tractor._state import ( + current_actor, + current_ipc_ctx, + is_root_process, +) +from ._repl import ( + PdbREPL, + mk_pdb, + TractorConfig as TractorConfig, +) +from ._tty_lock import ( + DebugStatus, + DebugStateError, + Lock, + request_root_stdio_lock, +) +from ._sigint import ( + sigint_shield as sigint_shield, + _ctlc_ignore_header as _ctlc_ignore_header +) +from ..pformat import ( + ppfmt, +) + +if TYPE_CHECKING: + from trio.lowlevel import Task + from threading import Thread + from tractor._runtime import ( + Actor, + ) + # from ._post_mortem import BoxedMaybeException + from ._repl import PdbREPL + +log = get_logger(__package__) + +_pause_msg: str = 'Opening a pdb REPL in paused actor' +_repl_fail_msg: str|None = ( + 'Failed to REPl via `_pause()` ' +) + +async def _pause( + + debug_func: Callable|partial|None, + + # NOTE: must be passed in the `.pause_from_sync()` case! + repl: PdbREPL|None = None, + + # TODO: allow caller to pause despite task cancellation, + # exactly the same as wrapping with: + # with CancelScope(shield=True): + # await pause() + # => the REMAINING ISSUE is that the scope's .__exit__() frame + # is always show in the debugger on entry.. and there seems to + # be no way to override it?.. + # + shield: bool = False, + hide_tb: bool = True, + called_from_sync: bool = False, + called_from_bg_thread: bool = False, + task_status: TaskStatus[ + tuple[Task, PdbREPL], + trio.Event + ] = trio.TASK_STATUS_IGNORED, + + # maybe pre/post REPL entry + repl_fixture: ( + AbstractContextManager[bool] + |None + ) = None, + + **debug_func_kwargs, + +) -> tuple[Task, PdbREPL]|None: + ''' + Inner impl for `pause()` to avoid the `trio.CancelScope.__exit__()` + stack frame when not shielded (since apparently i can't figure out + how to hide it using the normal mechanisms..) + + Hopefully we won't need this in the long run. + + ''' + __tracebackhide__: bool = hide_tb + pause_err: BaseException|None = None + actor: Actor = current_actor() + try: + task: Task = current_task() + except RuntimeError as rte: + # NOTE, 2 cases we might get here: + # + # - ACTUALLY not a `trio.lowlevel.Task` nor runtime caller, + # |_ error out as normal + # + # - an infected `asycio` actor calls it from an actual + # `asyncio.Task` + # |_ in this case we DO NOT want to RTE! + __tracebackhide__: bool = False + if actor.is_infected_aio(): + log.exception( + 'Failed to get current `trio`-task?' + ) + raise RuntimeError( + 'An `asyncio` task should not be calling this!?' + ) from rte + else: + task = asyncio.current_task() + + if debug_func is not None: + debug_func = partial(debug_func) + + # XXX NOTE XXX set it here to avoid ctl-c from cancelling a debug + # request from a subactor BEFORE the REPL is entered by that + # process. + if ( + not repl + and + debug_func + ): + repl: PdbREPL = mk_pdb() + DebugStatus.shield_sigint() + + # TODO: move this into a `open_debug_request()` @acm? + # -[ ] prolly makes the most sense to do the request + # task spawn as part of an `@acm` api which delivers the + # `DebugRequest` instance and ensures encapsing all the + # pld-spec and debug-nursery? + # -[ ] maybe make this a `PdbREPL` method or mod func? + # -[ ] factor out better, main reason for it is common logic for + # both root and sub repl entry + def _enter_repl_sync( + debug_func: partial[None], + ) -> None: + __tracebackhide__: bool = hide_tb + + # maybe enter any user fixture + enter_repl: bool = DebugStatus.maybe_enter_repl_fixture( + repl=repl, + repl_fixture=repl_fixture, + ) + if not enter_repl: + return + + debug_func_name: str = ( + debug_func.func.__name__ if debug_func else 'None' + ) + + # TODO: do we want to support using this **just** for the + # locking / common code (prolly to help address #320)? + task_status.started((task, repl)) + try: + if debug_func: + # block here one (at the appropriate frame *up*) where + # ``breakpoint()`` was awaited and begin handling stdio. + log.devx( + 'Entering sync world of the `pdb` REPL for task..\n' + f'{repl}\n' + f' |_{task}\n' + ) + + # set local task on process-global state to avoid + # recurrent entries/requests from the same + # actor-local task. + DebugStatus.repl_task = task + if repl: + DebugStatus.repl = repl + else: + log.error( + 'No REPl instance set before entering `debug_func`?\n' + f'{debug_func}\n' + ) + + # invoke the low-level REPL activation routine which itself + # should call into a `Pdb.set_trace()` of some sort. + debug_func( + repl=repl, + hide_tb=hide_tb, + **debug_func_kwargs, + ) + + # TODO: maybe invert this logic and instead + # do `assert debug_func is None` when + # `called_from_sync`? + else: + if ( + called_from_sync + and + not DebugStatus.is_main_trio_thread() + ): + assert called_from_bg_thread + assert DebugStatus.repl_task is not task + + return (task, repl) + + except trio.Cancelled: + log.exception( + 'Cancelled during invoke of internal\n\n' + f'`debug_func = {debug_func_name}`\n' + ) + # XXX NOTE: DON'T release lock yet + raise + + except BaseException: + __tracebackhide__: bool = False + log.exception( + 'Failed to invoke internal\n\n' + f'`debug_func = {debug_func_name}`\n' + ) + # NOTE: OW this is ONLY called from the + # `.set_continue/next` hooks! + DebugStatus.release(cancel_req_task=True) + + raise + + log.debug( + 'Entering `._pause()` for requesting task\n' + f'|_{task}\n' + ) + + # TODO: this should be created as part of `DebugRequest()` init + # which should instead be a one-shot-use singleton much like + # the `PdbREPL`. + repl_task: Thread|Task|None = DebugStatus.repl_task + if ( + not DebugStatus.repl_release + or + DebugStatus.repl_release.is_set() + ): + log.debug( + 'Setting new `DebugStatus.repl_release: trio.Event` for requesting task\n' + f'|_{task}\n' + ) + DebugStatus.repl_release = trio.Event() + else: + log.devx( + 'Already an existing actor-local REPL user task\n' + f'|_{repl_task}\n' + ) + + # ^-NOTE-^ this must be created BEFORE scheduling any subactor + # debug-req task since it needs to wait on it just after + # `.started()`-ing back its wrapping `.req_cs: CancelScope`. + + repl_err: BaseException|None = None + try: + if is_root_process(): + # we also wait in the root-parent for any child that + # may have the tty locked prior + # TODO: wait, what about multiple root tasks (with bg + # threads) acquiring it though? + ctx: Context|None = Lock.ctx_in_debug + repl_task: Task|None = DebugStatus.repl_task + if ( + ctx is None + and + repl_task is task + # and + # DebugStatus.repl + # ^-NOTE-^ matches for multi-threaded case as well? + ): + # re-entrant root process already has it: noop. + log.warning( + f'This root actor task is already within an active REPL session\n' + f'Ignoring this recurrent`tractor.pause()` entry\n\n' + f'|_{task}\n' + # TODO: use `._frame_stack` scanner to find the @api_frame + ) + with trio.CancelScope(shield=shield): + await trio.lowlevel.checkpoint() + return (repl, task) + + # elif repl_task: + # log.warning( + # f'This root actor has another task already in REPL\n' + # f'Waitin for the other task to complete..\n\n' + # f'|_{task}\n' + # # TODO: use `._frame_stack` scanner to find the @api_frame + # ) + # with trio.CancelScope(shield=shield): + # await DebugStatus.repl_release.wait() + # await trio.sleep(0.1) + + # must shield here to avoid hitting a `Cancelled` and + # a child getting stuck bc we clobbered the tty + with trio.CancelScope(shield=shield): + ctx_line = '`Lock` in this root actor task' + acq_prefix: str = 'shield-' if shield else '' + if ( + Lock._debug_lock.locked() + ): + if ctx: + ctx_line: str = ( + 'active `Lock` owned by ctx\n\n' + f'{ctx}' + ) + elif Lock._owned_by_root: + ctx_line: str = ( + 'Already owned by root-task `Lock`\n\n' + f'repl_task: {DebugStatus.repl_task}\n' + f'repl: {DebugStatus.repl}\n' + ) + else: + ctx_line: str = ( + '**STALE `Lock`** held by unknown root/remote task ' + 'with no request ctx !?!?' + ) + + log.debug( + f'attempting to {acq_prefix}acquire ' + f'{ctx_line}' + ) + await Lock._debug_lock.acquire() + Lock._owned_by_root = True + # else: + + # if ( + # not called_from_bg_thread + # and not called_from_sync + # ): + # log.devx( + # f'attempting to {acq_prefix}acquire ' + # f'{ctx_line}' + # ) + + # XXX: since we need to enter pdb synchronously below, + # and we don't want to block the thread that starts + # stepping through the application thread, we later + # must `Lock._debug_lock.release()` manually from + # some `PdbREPL` completion callback(`.set_[continue/exit]()`). + # + # So, when `._pause()` is called from a (bg/non-trio) + # thread, special provisions are needed and we need + # to do the `.acquire()`/`.release()` calls from + # a common `trio.task` (due to internal impl of + # `FIFOLock`). Thus we do not acquire here and + # instead expect `.pause_from_sync()` to take care of + # this detail depending on the caller's (threading) + # usage. + # + # NOTE that this special case is ONLY required when + # using `.pause_from_sync()` from the root actor + # since OW a subactor will instead make an IPC + # request (in the branch below) to acquire the + # `Lock`-mutex and a common root-actor RPC task will + # take care of `._debug_lock` mgmt! + + # enter REPL from root, no TTY locking IPC ctx necessary + # since we can acquire the `Lock._debug_lock` directly in + # thread. + return _enter_repl_sync(debug_func) + + # TODO: need a more robust check for the "root" actor + elif ( + not is_root_process() + and actor._parent_chan # a connected child + ): + repl_task: Task|None = DebugStatus.repl_task + req_task: Task|None = DebugStatus.req_task + if req_task: + log.warning( + f'Already an ongoing repl request?\n' + f'|_{req_task}\n\n' + + f'REPL task is\n' + f'|_{repl_task}\n\n' + + ) + # Recurrent entry case. + # this task already has the lock and is likely + # recurrently entering a `.pause()`-point either bc, + # - someone is hacking on runtime internals and put + # one inside code that get's called on the way to + # this code, + # - a legit app task uses the 'next' command while in + # a REPL sesh, and actually enters another + # `.pause()` (in a loop or something). + # + # XXX Any other cose is likely a bug. + if ( + repl_task + ): + if repl_task is task: + log.warning( + f'{task.name}@{actor.uid} already has TTY lock\n' + f'ignoring..' + ) + with trio.CancelScope(shield=shield): + await trio.lowlevel.checkpoint() + return + + else: + # if **this** actor is already in debug REPL we want + # to maintain actor-local-task mutex access, so block + # here waiting for the control to be released - this + # -> allows for recursive entries to `tractor.pause()` + log.warning( + f'{task}@{actor.uid} already has TTY lock\n' + f'waiting for release..' + ) + with trio.CancelScope(shield=shield): + await DebugStatus.repl_release.wait() + await trio.sleep(0.1) + + elif ( + req_task + ): + log.warning( + 'Local task already has active debug request\n' + f'|_{task}\n\n' + + 'Waiting for previous request to complete..\n' + ) + with trio.CancelScope(shield=shield): + await DebugStatus.req_finished.wait() + + # this **must** be awaited by the caller and is done using the + # root nursery so that the debugger can continue to run without + # being restricted by the scope of a new task nursery. + + # TODO: if we want to debug a trio.Cancelled triggered exception + # we have to figure out how to avoid having the service nursery + # cancel on this task start? I *think* this works below: + # ```python + # actor._service_tn.cancel_scope.shield = shield + # ``` + # but not entirely sure if that's a sane way to implement it? + + # NOTE currently we spawn the lock request task inside this + # subactor's global `Actor._service_tn` so that the + # lifetime of the lock-request can outlive the current + # `._pause()` scope while the user steps through their + # application code and when they finally exit the + # session, via 'continue' or 'quit' cmds, the `PdbREPL` + # will manually call `DebugStatus.release()` to release + # the lock session with the root actor. + # + # TODO: ideally we can add a tighter scope for this + # request task likely by conditionally opening a "debug + # nursery" inside `_errors_relayed_via_ipc()`, see the + # todo in tht module, but + # -[ ] it needs to be outside the normal crash handling + # `_maybe_enter_debugger()` block-call. + # -[ ] we probably only need to allocate the nursery when + # we detect the runtime is already in debug mode. + # + curr_ctx: Context = current_ipc_ctx() + # req_ctx: Context = await curr_ctx._debug_tn.start( + log.devx( + 'Starting request task\n' + f'|_{task}\n' + ) + with trio.CancelScope(shield=shield): + req_ctx: Context = await actor._service_tn.start( + partial( + request_root_stdio_lock, + actor_uid=actor.uid, + task_uid=(task.name, id(task)), # task uuid (effectively) + shield=shield, + ) + ) + # XXX sanity, our locker task should be the one which + # entered a new IPC ctx with the root actor, NOT the one + # that exists around the task calling into `._pause()`. + assert ( + req_ctx + is + DebugStatus.req_ctx + is not + curr_ctx + ) + + # enter REPL + return _enter_repl_sync(debug_func) + + # TODO: prolly factor this plus the similar block from + # `_enter_repl_sync()` into a common @cm? + except BaseException as _pause_err: + pause_err: BaseException = _pause_err + _repl_fail_report: str|None = _repl_fail_msg + if isinstance(pause_err, bdb.BdbQuit): + log.devx( + 'REPL for pdb was explicitly quit!\n' + ) + _repl_fail_report = None + + # when the actor is mid-runtime cancellation the + # `Actor._service_tn` might get closed before we can spawn + # the request task, so just ignore expected RTE. + elif ( + isinstance(pause_err, RuntimeError) + and + actor._cancel_called + ): + # service nursery won't be usable and we + # don't want to lock up the root either way since + # we're in (the midst of) cancellation. + log.warning( + 'Service nursery likely closed due to actor-runtime cancellation..\n' + 'Ignoring failed debugger lock request task spawn..\n' + ) + return + + elif isinstance(pause_err, trio.Cancelled): + _repl_fail_report += ( + 'You called `tractor.pause()` from an already cancelled scope!\n\n' + 'Consider `await tractor.pause(shield=True)` to make it work B)\n' + ) + + else: + _repl_fail_report += f'on behalf of {repl_task} ??\n' + + if _repl_fail_report: + log.exception(_repl_fail_report) + + if not actor.is_infected_aio(): + DebugStatus.release(cancel_req_task=True) + + # sanity checks for ^ on request/status teardown + # assert DebugStatus.repl is None # XXX no more bc bg thread cases? + assert DebugStatus.repl_task is None + + # sanity, for when hackin on all this? + if not isinstance(pause_err, trio.Cancelled): + req_ctx: Context = DebugStatus.req_ctx + # if req_ctx: + # # XXX, bc the child-task in root might cancel it? + # # assert req_ctx._scope.cancel_called + # assert req_ctx.maybe_error + + raise + + finally: + # set in finally block of func.. this can be synced-to + # eventually with a debug_nursery somehow? + # assert DebugStatus.req_task is None + + # always show frame when request fails due to internal + # failure in the above code (including an `BdbQuit`). + if ( + DebugStatus.req_err + or + repl_err + or + pause_err + ): + __tracebackhide__: bool = False + + +def _set_trace( + repl: PdbREPL, # passed by `_pause()` + hide_tb: bool, + + # partial-ed in by `.pause()` + api_frame: FrameType, + + # optionally passed in to provide support for + # `pause_from_sync()` where + actor: tractor.Actor|None = None, + task: Task|Thread|None = None, +): + __tracebackhide__: bool = hide_tb + actor: tractor.Actor = actor or current_actor() + task: Task|Thread = task or current_task() + + # else: + # TODO: maybe print the actor supervion tree up to the + # root here? Bo + log.pdb( + f'{_pause_msg}\n' + f'>(\n' + f'|_{actor.uid}\n' + f' |_{task}\n' # @ {actor.uid}\n' + # f'|_{task}\n' + # ^-TODO-^ more compact pformating? + # -[ ] make an `Actor.__repr()__` + # -[ ] should we use `log.pformat_task_uid()`? + ) + # presuming the caller passed in the "api frame" + # (the last frame before user code - like `.pause()`) + # then we only step up one frame to where the user + # called our API. + caller_frame: FrameType = api_frame.f_back # type: ignore + + # pretend this frame is the caller frame to show + # the entire call-stack all the way down to here. + if not hide_tb: + caller_frame: FrameType = inspect.currentframe() + + # engage ze REPL + # B~() + repl.set_trace(frame=caller_frame) + + +# XXX TODO! XXX, ensure `pytest -s` doesn't just +# hang on this being called in a test.. XD +# -[ ] maybe something in our test suite or is there +# some way we can detect output capture is enabled +# from the process itself? +# |_ronny: ? +# +async def pause( + *, + hide_tb: bool = True, + api_frame: FrameType|None = None, + + # TODO: figure out how to still make this work: + # -[ ] pass it direct to `_pause()`? + # -[ ] use it to set the `debug_nursery.cancel_scope.shield` + shield: bool = False, + **_pause_kwargs, + +) -> None: + ''' + A pause point (more commonly known as a "breakpoint") interrupt + instruction for engaging a blocking debugger instance to + conduct manual console-based-REPL-interaction from within + `tractor`'s async runtime, normally from some single-threaded + and currently executing actor-hosted-`trio`-task in some + (remote) process. + + NOTE: we use the semantics "pause" since it better encompasses + the entirety of the necessary global-runtime-state-mutation any + actor-task must access and lock in order to get full isolated + control over the process tree's root TTY: + https://en.wikipedia.org/wiki/Breakpoint + + ''' + __tracebackhide__: bool = hide_tb + + # always start 1 level up from THIS in user code since normally + # `tractor.pause()` is called explicitly by use-app code thus + # making it the highest up @api_frame. + api_frame: FrameType = api_frame or inspect.currentframe() + + # XXX TODO: this was causing cs-stack corruption in trio due to + # usage within the `Context._scope_nursery` (which won't work + # based on scoping of it versus call to `_maybe_enter_debugger()` + # from `._rpc._invoke()`) + # with trio.CancelScope( + # shield=shield, + # ) as cs: + # NOTE: so the caller can always manually cancel even + # if shielded! + # task_status.started(cs) + # log.critical( + # '`.pause() cancel-scope is:\n\n' + # f'{pformat_cs(cs, var_name="pause_cs")}\n\n' + # ) + await _pause( + debug_func=partial( + _set_trace, + api_frame=api_frame, + ), + shield=shield, + **_pause_kwargs + ) + # XXX avoid cs stack corruption when `PdbREPL.interaction()` + # raises `BdbQuit`. + # await DebugStatus.req_finished.wait() + + +_gb_mod: None|ModuleType|False = None + + +def maybe_import_greenback( + raise_not_found: bool = True, + force_reload: bool = False, + +) -> ModuleType|False: + # be cached-fast on module-already-inited + global _gb_mod + + if _gb_mod is False: + return False + + elif ( + _gb_mod is not None + and not force_reload + ): + return _gb_mod + + try: + import greenback + _gb_mod = greenback + return greenback + + except ModuleNotFoundError as mnf: + log.debug( + '`greenback` is not installed.\n' + 'No sync debug support!\n' + ) + _gb_mod = False + + if raise_not_found: + raise RuntimeError( + 'The `greenback` lib is required to use `tractor.pause_from_sync()`!\n' + 'https://github.com/oremanj/greenback\n' + ) from mnf + + return False + + +async def maybe_init_greenback(**kwargs) -> None|ModuleType: + try: + if mod := maybe_import_greenback(**kwargs): + await mod.ensure_portal() + log.devx( + '`greenback` portal opened!\n' + 'Sync debug support activated!\n' + ) + return mod + except BaseException: + log.exception('Failed to init `greenback`..') + raise + + return None + + +async def _pause_from_bg_root_thread( + behalf_of_thread: Thread, + repl: PdbREPL, + hide_tb: bool, + task_status: TaskStatus[Task] = trio.TASK_STATUS_IGNORED, + **_pause_kwargs, +): + ''' + Acquire the `Lock._debug_lock` from a bg (only need for + root-actor) non-`trio` thread (started via a call to + `.to_thread.run_sync()` in some actor) by scheduling this func in + the actor's service (TODO eventually a special debug_mode) + nursery. This task acquires the lock then `.started()`s the + `DebugStatus.repl_release: trio.Event` waits for the `PdbREPL` to + set it, then terminates very much the same way as + `request_root_stdio_lock()` uses an IPC `Context` from a subactor + to do the same from a remote process. + + This task is normally only required to be scheduled for the + special cases of a bg sync thread running in the root actor; see + the only usage inside `.pause_from_sync()`. + + ''' + global Lock + # TODO: unify this copied code with where it was + # from in `maybe_wait_for_debugger()` + # if ( + # Lock.req_handler_finished is not None + # and not Lock.req_handler_finished.is_set() + # and (in_debug := Lock.ctx_in_debug) + # ): + # log.devx( + # '\nRoot is waiting on tty lock to release from\n\n' + # # f'{caller_frame_info}\n' + # ) + # with trio.CancelScope(shield=True): + # await Lock.req_handler_finished.wait() + + # log.pdb( + # f'Subactor released debug lock\n' + # f'|_{in_debug}\n' + # ) + task: Task = current_task() + + # Manually acquire since otherwise on release we'll + # get a RTE raised by `trio` due to ownership.. + log.devx( + 'Trying to acquire `Lock` on behalf of bg thread\n' + f'|_{behalf_of_thread}\n' + ) + + # NOTE: this is already a task inside the main-`trio`-thread, so + # we don't need to worry about calling it another time from the + # bg thread on which who's behalf this task is operating. + DebugStatus.shield_sigint() + + out = await _pause( + debug_func=None, + repl=repl, + hide_tb=hide_tb, + called_from_sync=True, + called_from_bg_thread=True, + **_pause_kwargs + ) + DebugStatus.repl_task = behalf_of_thread + + lock: trio.FIFOLock = Lock._debug_lock + stats: trio.LockStatistics= lock.statistics() + assert stats.owner is task + assert Lock._owned_by_root + assert DebugStatus.repl_release + + # TODO: do we actually need this? + # originally i was trying to solve wy this was + # unblocking too soon in a thread but it was actually + # that we weren't setting our own `repl_release` below.. + while stats.owner is not task: + log.devx( + 'Trying to acquire `._debug_lock` from {stats.owner} for\n' + f'|_{behalf_of_thread}\n' + ) + await lock.acquire() + break + + # XXX NOTE XXX super important dawg.. + # set our own event since the current one might + # have already been overriden and then set when the + # last REPL mutex holder exits their sesh! + # => we do NOT want to override any existing one + # and we want to ensure we set our own ONLY AFTER we have + # acquired the `._debug_lock` + repl_release = DebugStatus.repl_release = trio.Event() + + # unblock caller thread delivering this bg task + log.devx( + 'Unblocking root-bg-thread since we acquired lock via `._pause()`\n' + f'|_{behalf_of_thread}\n' + ) + task_status.started(out) + + # wait for bg thread to exit REPL sesh. + try: + await repl_release.wait() + finally: + log.devx( + 'releasing lock from bg root thread task!\n' + f'|_ {behalf_of_thread}\n' + ) + Lock.release() + + +def pause_from_sync( + hide_tb: bool = True, + called_from_builtin: bool = False, + api_frame: FrameType|None = None, + + allow_no_runtime: bool = False, + + # proxy to `._pause()`, for ex: + # shield: bool = False, + # api_frame: FrameType|None = None, + **_pause_kwargs, + +) -> None: + ''' + Pause a `tractor` scheduled task or thread from sync (non-async + function) code. + + When `greenback` is installed we remap python's builtin + `breakpoint()` hook to this runtime-aware version which takes + care of all bg-thread detection and appropriate synchronization + with the root actor's `Lock` to avoid mult-thread/process REPL + clobbering Bo + + ''' + __tracebackhide__: bool = hide_tb + repl_owner: Task|Thread|None = None + try: + actor: tractor.Actor = current_actor( + err_on_no_runtime=False, + ) + if ( + not actor + and + not allow_no_runtime + ): + raise NoRuntime( + 'The actor runtime has not been opened?\n\n' + '`tractor.pause_from_sync()` is not functional without a wrapping\n' + '- `async with tractor.open_nursery()` or,\n' + '- `async with tractor.open_root_actor()`\n\n' + + 'If you are getting this from a builtin `breakpoint()` call\n' + 'it might mean the runtime was started then ' + 'stopped prematurely?\n' + ) + message: str = ( + f'{actor.uid} task called `tractor.pause_from_sync()`\n' + ) + + repl: PdbREPL = mk_pdb() + + # message += f'-> created local REPL {repl}\n' + is_trio_thread: bool = DebugStatus.is_main_trio_thread() + is_root: bool = is_root_process() + is_infected_aio: bool = actor.is_infected_aio() + thread: Thread = threading.current_thread() + + asyncio_task: asyncio.Task|None = None + if is_infected_aio: + asyncio_task = asyncio.current_task() + + # TODO: we could also check for a non-`.to_thread` context + # using `trio.from_thread.check_cancelled()` (says + # oremanj) wherein we get the following outputs: + # + # `RuntimeError`: non-`.to_thread` spawned thread + # noop: non-cancelled `.to_thread` + # `trio.Cancelled`: cancelled `.to_thread` + + # CASE: bg-thread spawned via `trio.to_thread` + # ----- + # when called from a (bg) thread, run an async task in a new + # thread which will call `._pause()` manually with special + # handling for root-actor caller usage. + if ( + not is_trio_thread + and + not asyncio_task + ): + # TODO: `threading.Lock()` this so we don't get races in + # multi-thr cases where they're acquiring/releasing the + # REPL and setting request/`Lock` state, etc.. + repl_owner: Thread = thread + + # TODO: make root-actor bg thread usage work! + if is_root: + message += ( + f'-> called from a root-actor bg {thread}\n' + ) + + message += ( + '-> scheduling `._pause_from_bg_root_thread()`..\n' + ) + # XXX SUBTLE BADNESS XXX that should really change! + # don't over-write the `repl` here since when + # this behalf-of-bg_thread-task calls pause it will + # pass `debug_func=None` which will result in it + # returing a `repl==None` output and that get's also + # `.started(out)` back here! So instead just ignore + # that output and assign the `repl` created above! + bg_task, _ = trio.from_thread.run( + afn=partial( + actor._service_tn.start, + partial( + _pause_from_bg_root_thread, + behalf_of_thread=thread, + repl=repl, + hide_tb=hide_tb, + **_pause_kwargs, + ), + ), + ) + DebugStatus.shield_sigint() + message += ( + f'-> `._pause_from_bg_root_thread()` started bg task {bg_task}\n' + ) + else: + message += f'-> called from a bg {thread}\n' + # NOTE: since this is a subactor, `._pause()` will + # internally issue a debug request via + # `request_root_stdio_lock()` and we don't need to + # worry about all the special considerations as with + # the root-actor per above. + bg_task, _ = trio.from_thread.run( + afn=partial( + _pause, + debug_func=None, + repl=repl, + hide_tb=hide_tb, + + # XXX to prevent `._pause()` for setting + # `DebugStatus.repl_task` to the gb task! + called_from_sync=True, + called_from_bg_thread=True, + + **_pause_kwargs + ), + ) + # ?TODO? XXX where do we NEED to call this in the + # subactor-bg-thread case? + DebugStatus.shield_sigint() + assert bg_task is not DebugStatus.repl_task + + # TODO: once supported, remove this AND the one + # inside `._pause()`! + # outstanding impl fixes: + # -[ ] need to make `.shield_sigint()` below work here! + # -[ ] how to handle `asyncio`'s new SIGINT-handler + # injection? + # -[ ] should `breakpoint()` work and what does it normally + # do in `asyncio` ctxs? + # if actor.is_infected_aio(): + # raise RuntimeError( + # '`tractor.pause[_from_sync]()` not yet supported ' + # 'for infected `asyncio` mode!' + # ) + # + # CASE: bg-thread running `asyncio.Task` + # ----- + elif ( + not is_trio_thread + and + is_infected_aio # as in, the special actor-runtime mode + # ^NOTE XXX, that doesn't mean the caller is necessarily + # an `asyncio.Task` just that `trio` has been embedded on + # the `asyncio` event loop! + and + asyncio_task # transitive caller is an actual `asyncio.Task` + ): + greenback: ModuleType = maybe_import_greenback() + + if greenback.has_portal(): + DebugStatus.shield_sigint() + fute: asyncio.Future = run_trio_task_in_future( + partial( + _pause, + debug_func=None, + repl=repl, + hide_tb=hide_tb, + + # XXX to prevent `._pause()` for setting + # `DebugStatus.repl_task` to the gb task! + called_from_sync=True, + called_from_bg_thread=True, + + **_pause_kwargs + ) + ) + repl_owner = asyncio_task + bg_task, _ = greenback.await_(fute) + # TODO: ASYNC version -> `.pause_from_aio()`? + # bg_task, _ = await fute + + # handle the case where an `asyncio` task has been + # spawned WITHOUT enabling a `greenback` portal.. + # => can often happen in 3rd party libs. + else: + bg_task = repl_owner + + # TODO, ostensibly we can just acquire the + # debug lock directly presuming we're the + # root actor running in infected asyncio + # mode? + # + # TODO, this would be a special case where + # a `_pause_from_root()` would come in very + # handy! + # if is_root: + # import pdbp; pdbp.set_trace() + # log.warning( + # 'Allowing `asyncio` task to acquire debug-lock in root-actor..\n' + # 'This is not fully implemented yet; there may be teardown hangs!\n\n' + # ) + # else: + + # simply unsupported, since there exists no hack (i + # can think of) to workaround this in a subactor + # which needs to lock the root's REPL ow we're sure + # to get prompt stdstreams clobbering.. + cf_repr: str = '' + if api_frame: + caller_frame: FrameType = api_frame.f_back + cf_repr: str = f'caller_frame: {caller_frame!r}\n' + + raise RuntimeError( + f"CAN'T USE `greenback._await()` without a portal !?\n\n" + f'Likely this task was NOT spawned via the `tractor.to_asyncio` API..\n' + f'{asyncio_task}\n' + f'{cf_repr}\n' + + f'Prolly the task was started out-of-band (from some lib?)\n' + f'AND one of the below was never called ??\n' + f'- greenback.ensure_portal()\n' + f'- greenback.bestow_portal()\n' + ) + + # CASE: `trio.run()` + "main thread" + # ----- + else: + # raises on not-found by default + greenback: ModuleType = maybe_import_greenback() + + # TODO: how to ensure this is either dynamically (if + # needed) called here (in some bg tn??) or that the + # subactor always already called it? + # greenback: ModuleType = await maybe_init_greenback() + + message += f'-> imported {greenback}\n' + + # NOTE XXX seems to need to be set BEFORE the `_pause()` + # invoke using gb below? + DebugStatus.shield_sigint() + repl_owner: Task = current_task() + + message += '-> calling `greenback.await_(_pause(debug_func=None))` from sync caller..\n' + try: + out = greenback.await_( + _pause( + debug_func=None, + repl=repl, + hide_tb=hide_tb, + called_from_sync=True, + **_pause_kwargs, + ) + ) + except RuntimeError as rte: + if not _state._runtime_vars.get( + 'use_greenback', + False, + ): + raise InternalError( + f'`greenback` was never initialized in this actor?\n' + f'\n' + f'{ppfmt(_state._runtime_vars)}\n' + ) from rte + + raise + + if out: + bg_task, _ = out + else: + bg_task: Task = current_task() + + # assert repl is repl + # assert bg_task is repl_owner + if bg_task is not repl_owner: + raise DebugStateError( + f'The registered bg task for this debug request is NOT its owner ??\n' + f'bg_task: {bg_task}\n' + f'repl_owner: {repl_owner}\n\n' + + f'{DebugStatus.repr()}\n' + ) + + # NOTE: normally set inside `_enter_repl_sync()` + DebugStatus.repl_task: str = repl_owner + + # TODO: ensure we aggressively make the user aware about + # entering the global `breakpoint()` built-in from sync + # code? + message += ( + f'-> successfully scheduled `._pause()` in `trio` thread on behalf of {bg_task}\n' + f'-> Entering REPL via `tractor._set_trace()` from caller {repl_owner}\n' + ) + log.devx(message) + + # NOTE set as late as possible to avoid state clobbering + # in the multi-threaded case! + DebugStatus.repl = repl + + _set_trace( + api_frame=api_frame or inspect.currentframe(), + repl=repl, + hide_tb=hide_tb, + actor=actor, + task=repl_owner, + ) + # LEGACY NOTE on next LOC's frame showing weirdness.. + # + # XXX NOTE XXX no other LOC can be here without it + # showing up in the REPL's last stack frame !?! + # -[ ] tried to use `@pdbp.hideframe` decoration but + # still doesn't work + except BaseException as err: + log.exception( + 'Failed to sync-pause from\n\n' + f'{repl_owner}\n' + ) + __tracebackhide__: bool = False + raise err + + +def _sync_pause_from_builtin( + *args, + called_from_builtin=True, + **kwargs, +) -> None: + ''' + Proxy call `.pause_from_sync()` but indicate the caller is the + `breakpoint()` built-in. + + Note: this always assigned to `os.environ['PYTHONBREAKPOINT']` + inside `._root.open_root_actor()` whenever `debug_mode=True` is + set. + + ''' + pause_from_sync( + *args, + called_from_builtin=True, + api_frame=inspect.currentframe(), + **kwargs, + ) + + +# NOTE prefer a new "pause" semantic since it better describes +# "pausing the actor's runtime" for this particular +# paralell task to do debugging in a REPL. +async def breakpoint( + hide_tb: bool = True, + **kwargs, +): + log.warning( + '`tractor.breakpoint()` is deprecated!\n' + 'Please use `tractor.pause()` instead!\n' + ) + __tracebackhide__: bool = hide_tb + await pause( + api_frame=inspect.currentframe(), + **kwargs, + ) diff --git a/tractor/devx/debug/_tty_lock.py b/tractor/devx/debug/_tty_lock.py new file mode 100644 index 00000000..3f9576a3 --- /dev/null +++ b/tractor/devx/debug/_tty_lock.py @@ -0,0 +1,1239 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation, either version 3 of +# the License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public +# License along with this program. If not, see +# . + +''' +Root-actor TTY mutex-locking machinery. + +''' +from __future__ import annotations +import asyncio +from contextlib import ( + AbstractContextManager, + asynccontextmanager as acm, + ExitStack, +) +import textwrap +import threading +import signal +from typing import ( + Any, + AsyncIterator, + Callable, + TypeAlias, + TYPE_CHECKING, +) +from types import ( + FrameType, +) + +from msgspec import Struct +import pdbp +import sniffio +import trio +from trio import CancelScope +from trio.lowlevel import ( + current_task, +) +from trio import ( + TaskStatus, +) +import tractor +from tractor.to_asyncio import run_trio_task_in_future +from tractor.log import get_logger +from tractor._context import Context +from tractor import _state +from tractor._exceptions import ( + DebugRequestError, + InternalError, +) +from tractor._state import ( + current_actor, + is_root_process, +) + +if TYPE_CHECKING: + from trio.lowlevel import Task + from threading import Thread + from tractor.ipc import ( + IPCServer, + ) + from tractor._runtime import ( + Actor, + ) + from ._repl import ( + PdbREPL, + ) + from ._post_mortem import ( + BoxedMaybeException, + ) + +log = get_logger(__name__) + + +class LockStatus( + Struct, + tag=True, + tag_field='msg_type', +): + subactor_uid: tuple[str, str] + cid: str + locked: bool + + +class LockRelease( + Struct, + tag=True, + tag_field='msg_type', +): + subactor_uid: tuple[str, str] + cid: str + + +__pld_spec__: TypeAlias = LockStatus|LockRelease + + +# TODO: instantiate this only in root from factory +# so as to allow runtime errors from subactors. +class Lock: + ''' + Actor-tree-global debug lock state, exists only in a root process. + + Mostly to avoid a lot of global declarations for now XD. + + ''' + @staticmethod + def get_locking_task_cs() -> CancelScope|None: + if not is_root_process(): + raise RuntimeError( + '`Lock.locking_task_cs` is invalid in subactors!' + ) + + if ctx := Lock.ctx_in_debug: + return ctx._scope + + return None + + # TODO: once we convert to singleton-per-actor-style + # @property + # def stats(cls) -> trio.LockStatistics: + # return cls._debug_lock.statistics() + + # @property + # def owner(cls) -> Task: + # return cls._debug_lock.statistics().owner + + # ROOT ONLY + # ------ - ------- + # the root-actor-ONLY singletons for, + # - the uid of the actor who's task is using a REPL + # - a literal task-lock, + # - a shielded-cancel-scope around the acquiring task*, + # - a broadcast event to signal no-actor using a REPL in tree, + # - a filter list to block subs-by-uid from locking. + # + # * in case it needs to be manually cancelled in root due to + # a stale lock condition (eg. IPC failure with the locking + # child + ctx_in_debug: Context|None = None + req_handler_finished: trio.Event|None = None + + _owned_by_root: bool = False + _debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock() + _blocked: set[ + tuple[str, str] # `Actor.uid` for per actor + |str # Context.cid for per task + ] = set() + + @classmethod + def repr(cls) -> str: + lock_stats: trio.LockStatistics = cls._debug_lock.statistics() + req: trio.Event|None = cls.req_handler_finished + fields: str = ( + f'|_ ._blocked: {cls._blocked}\n' + f'|_ ._debug_lock: {cls._debug_lock}\n' + f' {lock_stats}\n\n' + + f'|_ .ctx_in_debug: {cls.ctx_in_debug}\n' + f'|_ .req_handler_finished: {req}\n' + ) + if req: + req_stats: trio.EventStatistics = req.statistics() + fields += f' {req_stats}\n' + + body: str = textwrap.indent( + fields, + prefix=' ', + ) + return ( + f'<{cls.__name__}(\n' + f'{body}' + ')>\n\n' + ) + + @classmethod + # @pdbp.hideframe + def release( + cls, + raise_on_thread: bool = True, + + ) -> bool: + ''' + Release the actor-tree global TTY stdio lock (only) from the + `trio.run()`-main-thread. + + ''' + we_released: bool = False + ctx_in_debug: Context|None = cls.ctx_in_debug + repl_task: Task|Thread|None = DebugStatus.repl_task + try: + if not DebugStatus.is_main_trio_thread(): + thread: threading.Thread = threading.current_thread() + message: str = ( + '`Lock.release()` can not be called from a non-main-`trio` thread!\n' + f'{thread}\n' + ) + if raise_on_thread: + raise RuntimeError(message) + + log.devx(message) + return False + + task: Task = current_task() + message: str = ( + 'TTY NOT RELEASED on behalf of caller\n' + f'|_{task}\n' + ) + + # sanity check that if we're the root actor + # the lock is marked as such. + # note the pre-release value may be diff the the + # post-release task. + if repl_task is task: + assert cls._owned_by_root + message: str = ( + 'TTY lock held by root-actor on behalf of local task\n' + f'|_{repl_task}\n' + ) + else: + assert DebugStatus.repl_task is not task + + lock: trio.StrictFIFOLock = cls._debug_lock + owner: Task = lock.statistics().owner + if ( + lock.locked() + and + (owner is task) + # ^-NOTE-^ if we do NOT ensure this, `trio` will + # raise a RTE when a non-owner tries to releasee the + # lock. + # + # Further we need to be extra pedantic about the + # correct task, greenback-spawned-task and/or thread + # being set to the `.repl_task` such that the above + # condition matches and we actually release the lock. + # + # This is particular of note from `.pause_from_sync()`! + ): + cls._debug_lock.release() + we_released: bool = True + if repl_task: + message: str = ( + 'TTY released on behalf of root-actor-local REPL owner\n' + f'|_{repl_task}\n' + ) + else: + message: str = ( + 'TTY released by us on behalf of remote peer?\n' + f'{ctx_in_debug}\n' + ) + + except RuntimeError as rte: + log.exception( + 'Failed to release `Lock._debug_lock: trio.FIFOLock`?\n' + ) + raise rte + + finally: + # IFF there are no more requesting tasks queued up fire, the + # "tty-unlocked" event thereby alerting any monitors of the lock that + # we are now back in the "tty unlocked" state. This is basically + # and edge triggered signal around an empty queue of sub-actor + # tasks that may have tried to acquire the lock. + lock_stats: trio.LockStatistics = cls._debug_lock.statistics() + req_handler_finished: trio.Event|None = Lock.req_handler_finished + if ( + not lock_stats.owner + and + req_handler_finished is None + ): + message += ( + '-> No new task holds the TTY lock!\n\n' + f'{Lock.repr()}\n' + ) + + elif ( + req_handler_finished # new IPC ctx debug request active + and + lock.locked() # someone has the lock + ): + behalf_of_task = ( + ctx_in_debug + or + repl_task + ) + message += ( + f'A non-caller task still owns this lock on behalf of\n' + f'{behalf_of_task}\n' + f'lock owner task: {lock_stats.owner}\n' + ) + + if ( + we_released + and + ctx_in_debug + ): + cls.ctx_in_debug = None # unset + + # post-release value (should be diff then value above!) + repl_task: Task|Thread|None = DebugStatus.repl_task + if ( + cls._owned_by_root + and + we_released + ): + cls._owned_by_root = False + + if task is not repl_task: + message += ( + 'Lock released by root actor on behalf of bg thread\n' + f'|_{repl_task}\n' + ) + + if message: + log.devx(message) + + return we_released + + @classmethod + @acm + async def acquire_for_ctx( + cls, + ctx: Context, + + ) -> AsyncIterator[trio.StrictFIFOLock]: + ''' + Acquire a root-actor local FIFO lock which tracks mutex access of + the process tree's global debugger breakpoint. + + This lock avoids tty clobbering (by preventing multiple processes + reading from stdstreams) and ensures multi-actor, sequential access + to the ``pdb`` repl. + + ''' + if not is_root_process(): + raise RuntimeError('Only callable by a root actor task!') + + # subactor_uid: tuple[str, str] = ctx.chan.uid + we_acquired: bool = False + log.runtime( + f'Attempting to acquire TTY lock for sub-actor\n' + f'{ctx}' + ) + try: + pre_msg: str = ( + f'Entering lock checkpoint for sub-actor\n' + f'{ctx}' + ) + stats = cls._debug_lock.statistics() + if owner := stats.owner: + pre_msg += ( + f'\n' + f'`Lock` already held by local task?\n' + f'{owner}\n\n' + # f'On behalf of task: {cls.remote_task_in_debug!r}\n' + f'On behalf of IPC ctx\n' + f'{ctx}' + ) + log.runtime(pre_msg) + + # NOTE: if the surrounding cancel scope from the + # `lock_stdio_for_peer()` caller is cancelled, this line should + # unblock and NOT leave us in some kind of + # a "child-locked-TTY-but-child-is-uncontactable-over-IPC" + # condition. + await cls._debug_lock.acquire() + cls.ctx_in_debug = ctx + we_acquired = True + + log.runtime( + f'TTY lock acquired for sub-actor\n' + f'{ctx}' + ) + + # NOTE: critical section: this yield is unshielded! + # + # IF we received a cancel during the shielded lock entry of some + # next-in-queue requesting task, then the resumption here will + # result in that ``trio.Cancelled`` being raised to our caller + # (likely from `lock_stdio_for_peer()` below)! In + # this case the ``finally:`` below should trigger and the + # surrounding caller side context should cancel normally + # relaying back to the caller. + + yield cls._debug_lock + + finally: + message :str = 'Exiting `Lock.acquire_for_ctx()` on behalf of sub-actor\n' + if we_acquired: + cls.release() + message += '-> TTY lock released by child\n' + + else: + message += '-> TTY lock never acquired by child??\n' + + log.runtime( + f'{message}\n' + f'{ctx}' + ) + + +def get_lock() -> Lock: + return Lock + + +@tractor.context( + # enable the locking msgspec + pld_spec=__pld_spec__, +) +async def lock_stdio_for_peer( + ctx: Context, + subactor_task_uid: tuple[str, int], + +) -> LockStatus|LockRelease: + ''' + Lock the TTY in the root process of an actor tree in a new + inter-actor-context-task such that the ``pdbp`` debugger console + can be mutex-allocated to the calling sub-actor for REPL control + without interference by other processes / threads. + + NOTE: this task must be invoked in the root process of the actor + tree. It is meant to be invoked as an rpc-task and should be + highly reliable at releasing the mutex complete! + + ''' + subactor_uid: tuple[str, str] = ctx.chan.uid + + # mark the tty lock as being in use so that the runtime + # can try to avoid clobbering any connection from a child + # that's currently relying on it. + we_finished = Lock.req_handler_finished = trio.Event() + lock_blocked: bool = False + try: + if ctx.cid in Lock._blocked: + raise RuntimeError( + f'Double lock request!?\n' + f'The same remote task already has an active request for TTY lock ??\n\n' + f'subactor uid: {subactor_uid}\n\n' + + 'This might be mean that the requesting task ' + 'in `request_root_stdio_lock()` may have crashed?\n' + 'Consider that an internal bug exists given the TTY ' + '`Lock`ing IPC dialog..\n' + ) + Lock._blocked.add(ctx.cid) + lock_blocked = True + root_task_name: str = current_task().name + if tuple(subactor_uid) in Lock._blocked: + log.warning( + f'Subactor is blocked from acquiring debug lock..\n' + f'subactor_uid: {subactor_uid}\n' + f'remote task: {subactor_task_uid}\n' + ) + ctx._enter_debugger_on_cancel: bool = False + message: str = ( + f'Debug lock blocked for subactor\n\n' + f'x)<= {subactor_uid}\n\n' + + f'Likely because the root actor already started shutdown and is ' + 'closing IPC connections for this child!\n\n' + 'Cancelling debug request!\n' + ) + log.cancel(message) + await ctx.cancel() + raise DebugRequestError(message) + + log.devx( + 'Subactor attempting to acquire TTY lock\n' + f'root task: {root_task_name}\n' + f'subactor_uid: {subactor_uid}\n' + f'remote task: {subactor_task_uid}\n' + ) + DebugStatus.shield_sigint() + + # NOTE: we use the IPC ctx's cancel scope directly in order to + # ensure that on any transport failure, or cancellation request + # from the child we expect + # `Context._maybe_cancel_and_set_remote_error()` to cancel this + # scope despite the shielding we apply below. + debug_lock_cs: CancelScope = ctx._scope + + async with Lock.acquire_for_ctx(ctx=ctx): + debug_lock_cs.shield = True + + log.devx( + 'Subactor acquired debugger request lock!\n' + f'root task: {root_task_name}\n' + f'subactor_uid: {subactor_uid}\n' + f'remote task: {subactor_task_uid}\n\n' + + 'Sending `ctx.started(LockStatus)`..\n' + + ) + + # indicate to child that we've locked stdio + await ctx.started( + LockStatus( + subactor_uid=subactor_uid, + cid=ctx.cid, + locked=True, + ) + ) + + log.devx( + f'Actor {subactor_uid} acquired `Lock` via debugger request' + ) + + # wait for unlock pdb by child + async with ctx.open_stream() as stream: + release_msg: LockRelease = await stream.receive() + + # TODO: security around only releasing if + # these match? + log.devx( + f'TTY lock released requested\n\n' + f'{release_msg}\n' + ) + assert release_msg.cid == ctx.cid + assert release_msg.subactor_uid == tuple(subactor_uid) + + log.devx( + f'Actor {subactor_uid} released TTY lock' + ) + + return LockStatus( + subactor_uid=subactor_uid, + cid=ctx.cid, + locked=False, + ) + + except BaseException as req_err: + fail_reason: str = ( + f'on behalf of peer\n\n' + f'x)<=\n' + f' |_{subactor_task_uid!r}@{ctx.chan.uid!r}\n' + f'\n' + 'Forcing `Lock.release()` due to acquire failure!\n\n' + f'x)=>\n' + f' {ctx}' + ) + if isinstance(req_err, trio.Cancelled): + fail_reason = ( + 'Cancelled during stdio-mutex request ' + + + fail_reason + ) + else: + fail_reason = ( + 'Failed to deliver stdio-mutex request ' + + + fail_reason + ) + + log.exception(fail_reason) + Lock.release() + raise + + finally: + if lock_blocked: + Lock._blocked.remove(ctx.cid) + + # wakeup any waiters since the lock was (presumably) + # released, possibly only temporarily. + we_finished.set() + DebugStatus.unshield_sigint() + + +class DebugStateError(InternalError): + ''' + Something inconsistent or unexpected happend with a sub-actor's + debug mutex request to the root actor. + + ''' + + +# TODO: rename to ReplState or somethin? +# DebugRequest, make it a singleton instance? +class DebugStatus: + ''' + Singleton-state for debugging machinery in a subactor. + + Composes conc primitives for syncing with a root actor to + acquire the tree-global (TTY) `Lock` such that only ever one + actor's task can have the REPL active at a given time. + + Methods to shield the process' `SIGINT` handler are used + whenever a local task is an active REPL. + + ''' + # XXX local ref to the `pdbp.Pbp` instance, ONLY set in the + # actor-process that currently has activated a REPL i.e. it + # should be `None` (unset) in any other actor-process that does + # not yet have the `Lock` acquired via a root-actor debugger + # request. + repl: PdbREPL|None = None + + # any `repl_fixture` provided by user are entered and + # latered closed on `.release()` + _fixture_stack = ExitStack() + + # TODO: yet again this looks like a task outcome where we need + # to sync to the completion of one task (and get its result) + # being used everywhere for syncing.. + # -[ ] see if we can get our proto oco task-mngr to work for + # this? + repl_task: Task|None = None + # repl_thread: Thread|None = None + # ^TODO? + + repl_release: trio.Event|None = None + + req_task: Task|None = None + req_ctx: Context|None = None + req_cs: CancelScope|None = None + req_finished: trio.Event|None = None + req_err: BaseException|None = None + + lock_status: LockStatus|None = None + + _orig_sigint_handler: Callable|None = None + _trio_handler: ( + Callable[[int, FrameType|None], Any] + |int + | None + ) = None + + @classmethod + def repr(cls) -> str: + fields: str = ( + f'repl: {cls.repl}\n' + f'repl_task: {cls.repl_task}\n' + f'repl_release: {cls.repl_release}\n' + f'req_ctx: {cls.req_ctx}\n' + ) + body: str = textwrap.indent( + fields, + prefix=' |_', + ) + return ( + f'<{cls.__name__}(\n' + f'{body}' + ')>' + ) + + # TODO: how do you get this to work on a non-inited class? + # __repr__ = classmethod(repr) + # __str__ = classmethod(repr) + + @classmethod + def shield_sigint(cls): + ''' + Shield out SIGINT handling (which by default triggers + `Task` cancellation) in subactors when a `pdb` REPL + is active. + + Avoids cancellation of the current actor (task) when the user + mistakenly sends ctl-c or via a recevied signal (from an + external request). Explicit runtime cancel requests are + allowed until the current REPL-session (the blocking call + `Pdb.interaction()`) exits, normally via the 'continue' or + 'quit' command - at which point the orig SIGINT handler is + restored via `.unshield_sigint()` below. + + Impl notes: + ----------- + - we prefer that `trio`'s default handler is always used when + SIGINT is unshielded (hence disabling the `pdb.Pdb` + defaults in `mk_pdb()`) such that reliable KBI cancellation + is always enforced. + + - we always detect whether we're running from a non-main + thread, in which case schedule the SIGINT shielding override + to in the main thread as per, + + https://docs.python.org/3/library/signal.html#signals-and-threads + + ''' + from ._sigint import ( + sigint_shield, + ) + # + # XXX detect whether we're running from a non-main thread + # in which case schedule the SIGINT shielding override + # to in the main thread. + # https://docs.python.org/3/library/signal.html#signals-and-threads + if ( + not cls.is_main_trio_thread() + and + not _state._runtime_vars.get( + '_is_infected_aio', + False, + ) + ): + cls._orig_sigint_handler: Callable = trio.from_thread.run_sync( + signal.signal, + signal.SIGINT, + sigint_shield, + ) + + else: + cls._orig_sigint_handler = signal.signal( + signal.SIGINT, + sigint_shield, + ) + + @classmethod + @pdbp.hideframe # XXX NOTE XXX see below in `.pause_from_sync()` + def unshield_sigint(cls): + ''' + Un-shield SIGINT for REPL-active (su)bactor. + + See details in `.shield_sigint()`. + + ''' + # always restore ``trio``'s sigint handler. see notes below in + # the pdb factory about the nightmare that is that code swapping + # out the handler when the repl activates... + # if not cls.is_main_trio_thread(): + if ( + not cls.is_main_trio_thread() + and + not _state._runtime_vars.get( + '_is_infected_aio', + False, + ) + # not current_actor().is_infected_aio() + # ^XXX, since for bg-thr case will always raise.. + ): + trio.from_thread.run_sync( + signal.signal, + signal.SIGINT, + cls._trio_handler, + ) + else: + trio_h: Callable = cls._trio_handler + # XXX should never really happen XXX + if not trio_h: + from ._repl import mk_pdb + mk_pdb().set_trace() + + signal.signal( + signal.SIGINT, + cls._trio_handler, + ) + + cls._orig_sigint_handler = None + + @classmethod + def is_main_trio_thread(cls) -> bool: + ''' + Check if we're the "main" thread (as in the first one + started by cpython) AND that it is ALSO the thread that + called `trio.run()` and not some thread spawned with + `trio.to_thread.run_sync()`. + + ''' + try: + async_lib: str = sniffio.current_async_library() + except sniffio.AsyncLibraryNotFoundError: + async_lib = None + + is_main_thread: bool = trio._util.is_main_thread() + # ^TODO, since this is private, @oremanj says + # we should just copy the impl for now..? + if is_main_thread: + thread_name: str = 'main' + else: + thread_name: str = threading.current_thread().name + + is_trio_main = ( + is_main_thread + and + (async_lib == 'trio') + ) + + report: str = f'Running thread: {thread_name!r}\n' + if async_lib: + report += ( + f'Current async-lib detected by `sniffio`: {async_lib}\n' + ) + else: + report += ( + 'No async-lib detected (by `sniffio`) ??\n' + ) + if not is_trio_main: + log.warning(report) + + return is_trio_main + # XXX apparently unreliable..see ^ + # ( + # threading.current_thread() + # is not threading.main_thread() + # ) + + @classmethod + def cancel(cls) -> bool: + if (req_cs := cls.req_cs): + req_cs.cancel() + return True + + return False + + # TODO, support @acm? + # -[ ] what about a return-proto for determining + # whether the REPL should be allowed to enage? + # -[x] consider factoring this `_repl_fixture` block into + # a common @cm somehow so it can be repurposed both here and + # in `._pause()`?? + # -[ ] we could also use the `ContextDecorator`-type in that + # case to simply decorate the `_enter_repl_sync()` closure? + # |_https://docs.python.org/3/library/contextlib.html#using-a-context-manager-as-a-function-decorator + @classmethod + def maybe_enter_repl_fixture( + cls, + # ^XXX **always provided** by the low-level REPL-invoker, + # - _post_mortem() + # - _pause() + repl: PdbREPL, + + # maybe pre/post REPL entry + repl_fixture: ( + AbstractContextManager[bool] + |None + ) = None, + + # if called from crashed context, provided by + # `open_crash_handler()` + boxed_maybe_exc: BoxedMaybeException|None = None, + ) -> bool: + ''' + Maybe open a pre/post REPL entry "fixture" `@cm` provided by the + user, the caller should use the delivered `bool` to determine + whether to engage the `PdbREPL`. + + ''' + if not ( + (rt_repl_fixture := _state._runtime_vars.get('repl_fixture')) + or + repl_fixture + ): + return True # YES always enter + + _repl_fixture = ( + repl_fixture + or + rt_repl_fixture + ) + enter_repl: bool = DebugStatus._fixture_stack.enter_context( + _repl_fixture( + repl=repl, + maybe_bxerr=boxed_maybe_exc, + ) + ) + if not enter_repl: + log.pdb( + f'pdbp-REPL blocked by a `repl_fixture()` which yielded `False` !\n' + f'repl_fixture: {repl_fixture}\n' + f'rt_repl_fixture: {rt_repl_fixture}\n' + ) + + log.devx( + f'User provided `repl_fixture` entered with,\n' + f'{repl_fixture!r} -> {enter_repl!r}\n' + ) + return enter_repl + + @classmethod + # @pdbp.hideframe + def release( + cls, + cancel_req_task: bool = False, + ): + repl_release: trio.Event = cls.repl_release + try: + # sometimes the task might already be terminated in + # which case this call will raise an RTE? + # See below for reporting on that.. + if ( + repl_release is not None + and + not repl_release.is_set() + ): + if cls.is_main_trio_thread(): + repl_release.set() + + elif ( + _state._runtime_vars.get( + '_is_infected_aio', + False, + ) + # ^XXX, again bc we need to not except + # but for bg-thread case it will always raise.. + # + # TODO, is there a better api then using + # `err_on_no_runtime=False` in the below? + # current_actor().is_infected_aio() + ): + async def _set_repl_release(): + repl_release.set() + + fute: asyncio.Future = run_trio_task_in_future( + _set_repl_release + ) + if not fute.done(): + log.warning('REPL release state unknown..?') + + else: + # XXX NOTE ONLY used for bg root-actor sync + # threads, see `.pause_from_sync()`. + trio.from_thread.run_sync( + repl_release.set + ) + + except RuntimeError as rte: + log.exception( + f'Failed to release debug-request ??\n\n' + f'{cls.repr()}\n' + ) + # pdbp.set_trace() + raise rte + + finally: + # if req_ctx := cls.req_ctx: + # req_ctx._scope.cancel() + if cancel_req_task: + cancelled: bool = cls.cancel() + if not cancelled: + log.warning( + 'Failed to cancel request task!?\n' + f'{cls.repl_task}\n' + ) + + # actor-local state, irrelevant for non-root. + cls.repl_task = None + + # XXX WARNING needs very special caughtion, and we should + # prolly make a more explicit `@property` API? + # + # - if unset in root multi-threaded case can cause + # issues with detecting that some root thread is + # using a REPL, + # + # - what benefit is there to unsetting, it's always + # set again for the next task in some actor.. + # only thing would be to avoid in the sigint-handler + # logging when we don't need to? + cls.repl = None + + # maybe restore original sigint handler + # XXX requires runtime check to avoid crash! + if current_actor(err_on_no_runtime=False): + cls.unshield_sigint() + + cls._fixture_stack.close() + + +# TODO: use the new `@lowlevel.singleton` for this! +def get_debug_req() -> DebugStatus|None: + return DebugStatus + + +async def request_root_stdio_lock( + actor_uid: tuple[str, str], + task_uid: tuple[str, int], + + shield: bool = False, + task_status: TaskStatus[CancelScope] = trio.TASK_STATUS_IGNORED, +): + ''' + Connect to the root actor for this actor's process tree and + RPC-invoke a task which acquires the std-streams global `Lock`: + a process-tree-global mutex which prevents multiple actors from + entering `PdbREPL.interaction()` at the same time such that the + parent TTY's stdio is never "clobbered" by simultaneous + reads/writes. + + The actual `Lock` singleton instance exists ONLY in the root + actor's memory space and does nothing more then manage + process-tree global state, + namely a `._debug_lock: trio.FIFOLock`. + + The actual `PdbREPL` interaction/operation is completely isolated + to each sub-actor (process) with the root's `Lock` providing the + multi-process mutex-syncing mechanism to avoid parallel REPL + usage within an actor tree. + + ''' + log.devx( + 'Initing stdio-lock request task with root actor' + ) + # TODO: can we implement this mutex more generally as + # a `._sync.Lock`? + # -[ ] simply add the wrapping needed for the debugger specifics? + # - the `__pld_spec__` impl and maybe better APIs for the client + # vs. server side state tracking? (`Lock` + `DebugStatus`) + # -[ ] for eg. `mp` has a multi-proc lock via the manager + # - https://docs.python.org/3.8/library/multiprocessing.html#synchronization-primitives + # -[ ] technically we need a `RLock` since re-acquire should be a noop + # - https://docs.python.org/3.8/library/multiprocessing.html#multiprocessing.RLock + DebugStatus.req_finished = trio.Event() + DebugStatus.req_task = current_task() + req_err: BaseException|None = None + try: + from tractor._discovery import get_root + # NOTE: we need this to ensure that this task exits + # BEFORE the REPl instance raises an error like + # `bdb.BdbQuit` directly, OW you get a trio cs stack + # corruption! + # Further, the since this task is spawned inside the + # `Context._scope_nursery: trio.Nursery`, once an RPC + # task errors that cs is cancel_called and so if we want + # to debug the TPC task that failed we need to shield + # against that expected `.cancel()` call and instead + # expect all of the `PdbREPL`.set_[continue/quit/]()` + # methods to unblock this task by setting the + # `.repl_release: # trio.Event`. + with trio.CancelScope(shield=shield) as req_cs: + # XXX: was orig for debugging cs stack corruption.. + # log.devx( + # 'Request cancel-scope is:\n\n' + # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' + # ) + DebugStatus.req_cs = req_cs + req_ctx: Context|None = None + ctx_eg: BaseExceptionGroup|None = None + try: + # TODO: merge into single async with ? + async with get_root() as portal: + async with portal.open_context( + lock_stdio_for_peer, + subactor_task_uid=task_uid, + + # NOTE: set it here in the locker request task bc it's + # possible for multiple such requests for the lock in any + # single sub-actor AND there will be a race between when the + # root locking task delivers the `Started(pld=LockStatus)` + # and when the REPL is actually entered by the requesting + # application task who called + # `.pause()`/`.post_mortem()`. + # + # SO, applying the pld-spec here means it is only applied to + # this IPC-ctx request task, NOT any other task(s) + # including the one that actually enters the REPL. This + # is oc desired bc ow the debugged task will msg-type-error. + # pld_spec=__pld_spec__, + + ) as (req_ctx, status): + + DebugStatus.req_ctx = req_ctx + log.devx( + 'Subactor locked TTY with msg\n\n' + f'{status}\n' + ) + + # try: + if (locker := status.subactor_uid) != actor_uid: + raise DebugStateError( + f'Root actor locked by another peer !?\n' + f'locker: {locker!r}\n' + f'actor_uid: {actor_uid}\n' + ) + assert status.cid + # except AttributeError: + # log.exception('failed pldspec asserts!') + # mk_pdb().set_trace() + # raise + + # set last rxed lock dialog status. + DebugStatus.lock_status = status + + async with req_ctx.open_stream() as stream: + task_status.started(req_ctx) + + # wait for local task to exit + # `PdbREPL.interaction()`, normally via + # a `DebugStatus.release()`call, and + # then unblock us here. + await DebugStatus.repl_release.wait() + await stream.send( + LockRelease( + subactor_uid=actor_uid, + cid=status.cid, + ) + ) + + # sync with child-side root locker task + # completion + status: LockStatus = await req_ctx.result() + assert not status.locked + DebugStatus.lock_status = status + + log.devx( + 'TTY lock was released for subactor with msg\n\n' + f'{status}\n\n' + f'Exitting {req_ctx.side!r}-side of locking req_ctx\n' + ) + + except* ( + tractor.ContextCancelled, + trio.Cancelled, + ) as _taskc_eg: + ctx_eg = _taskc_eg + log.cancel( + 'Debug lock request was CANCELLED?\n\n' + f'<=c) {req_ctx}\n' + # f'{pformat_cs(req_cs, var_name="req_cs")}\n\n' + # f'{pformat_cs(req_ctx._scope, var_name="req_ctx._scope")}\n\n' + ) + raise + + except* ( + BaseException, + ) as _ctx_eg: + ctx_eg = _ctx_eg + message: str = ( + 'Failed during debug request dialog with root actor?\n' + ) + if (req_ctx := DebugStatus.req_ctx): + message += ( + f'<=x)\n' + f' |_{req_ctx}\n' + f'Cancelling IPC ctx!\n' + ) + try: + await req_ctx.cancel() + except trio.ClosedResourceError as terr: + ctx_eg.add_note( + # f'Failed with {type(terr)!r} x)> `req_ctx.cancel()` ' + f'Failed with `req_ctx.cancel()` bool: + ''' + Predicate to determine if a reported child subactor in debug + is actually connected. + + Useful to detect stale `Lock` requests after IPC failure. + + ''' + actor: Actor = current_actor() + server: IPCServer = actor.ipc_server + + if not is_root_process(): + raise InternalError('This is a root-actor only API!') + + if ( + (ctx := Lock.ctx_in_debug) + and + (uid_in_debug := ctx.chan.uid) + ): + chans: list[tractor.Channel] = server._peers.get( + tuple(uid_in_debug) + ) + if chans: + return any( + chan.connected() + for chan in chans + ) + + return False diff --git a/tractor/devx/pformat.py b/tractor/devx/pformat.py index 1530ef02..38b942ff 100644 --- a/tractor/devx/pformat.py +++ b/tractor/devx/pformat.py @@ -15,10 +15,13 @@ # along with this program. If not, see . ''' -Pretty formatters for use throughout the code base. -Mostly handy for logging and exception message content. +Pretty formatters for use throughout our internals. + +Handy for logging and exception message content but also for `repr()` +in REPL(s). ''' +import sys import textwrap import traceback @@ -115,6 +118,85 @@ def pformat_boxed_tb( ) +def pformat_exc( + exc: Exception, + header: str = '', + message: str = '', + body: str = '', + with_type_header: bool = True, +) -> str: + + # XXX when the currently raised exception is this instance, + # we do not ever use the "type header" style repr. + is_being_raised: bool = False + if ( + (curr_exc := sys.exception()) + and + curr_exc is exc + ): + is_being_raised: bool = True + + with_type_header: bool = ( + with_type_header + and + not is_being_raised + ) + + # style + if ( + with_type_header + and + not header + ): + header: str = f'<{type(exc).__name__}(' + + message: str = ( + message + or + exc.message + ) + if message: + # split off the first line so, if needed, it isn't + # indented the same like the "boxed content" which + # since there is no `.tb_str` is just the `.message`. + lines: list[str] = message.splitlines() + first: str = lines[0] + message: str = message.removeprefix(first) + + # with a type-style header we, + # - have no special message "first line" extraction/handling + # - place the message a space in from the header: + # `MsgTypeError( ..` + # ^-here + # - indent the `.message` inside the type body. + if with_type_header: + first = f' {first} )>' + + message: str = textwrap.indent( + message, + prefix=' '*2, + ) + message: str = first + message + + tail: str = '' + if ( + with_type_header + and + not message + ): + tail: str = '>' + + return ( + header + + + message + + + f'{body}' + + + tail + ) + + def pformat_caller_frame( stack_limit: int = 1, box_tb: bool = True, @@ -144,8 +226,8 @@ def pformat_cs( field_prefix: str = ' |_', ) -> str: ''' - Pretty format info about a `trio.CancelScope` including most - of its public state and `._cancel_status`. + Pretty format info about a `trio.CancelScope` including most of + its public state and `._cancel_status`. The output can be modified to show a "var name" for the instance as a field prefix, just a simple str before each @@ -167,3 +249,279 @@ def pformat_cs( + fields ) + + +def nest_from_op( + input_op: str, # TODO, Literal of all op-"symbols" from below? + text: str, + prefix_op: bool = True, # unset is to suffix the first line + # optionally suffix `text`, by def on a newline + op_suffix='\n', + + nest_prefix: str = '|_', + nest_indent: int|None = None, + # XXX indent `next_prefix` "to-the-right-of" `input_op` + # by this count of whitespaces (' '). + rm_from_first_ln: str|None = None, + +) -> str: + ''' + Depth-increment the input (presumably hierarchy/supervision) + input "tree string" below the provided `input_op` execution + operator, so injecting a `"\n|_{input_op}\n"`and indenting the + `tree_str` to nest content aligned with the ops last char. + + ''' + # `sclang` "structurred-concurrency-language": an ascii-encoded + # symbolic alphabet to describe concurrent systems. + # + # ?TODO? aa more fomal idea for a syntax to the state of + # concurrent systems as a "3-domain" (execution, scope, storage) + # model and using a minimal ascii/utf-8 operator-set. + # + # try not to take any of this seriously yet XD + # + # > is a "play operator" indicating (CPU bound) + # exec/work/ops required at the "lowest level computing" + # + # execution primititves (tasks, threads, actors..) denote their + # lifetime with '(' and ')' since parentheses normally are used + # in many langs to denote function calls. + # + # starting = ( + # >( opening/starting; beginning of the thread-of-exec (toe?) + # (> opened/started, (finished spawning toe) + # |_ repr of toe, in py these look like + # + # >) closing/exiting/stopping, + # )> closed/exited/stopped, + # |_ + # [OR <), )< ?? ] + # + # ending = ) + # >c) cancelling to close/exit + # c)> cancelled (caused close), OR? + # |_ + # OR maybe "x) erroring to eventuall exit + # x)> errored and terminated + # |_ + # + # scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc. + # >{ opening + # {> opened + # }> closed + # >} closing + # + # storage: like queues, shm-buffers, files, etc.. + # >[ opening + # [> opened + # |_ + # + # >] closing + # ]> closed + + # IPC ops: channels, transports, msging + # => req msg + # <= resp msg + # <=> 2-way streaming (of msgs) + # <- recv 1 msg + # -> send 1 msg + # + # TODO: still not sure on R/L-HS approach..? + # =>( send-req to exec start (task, actor, thread..) + # (<= recv-req to ^ + # + # (<= recv-req ^ + # <=( recv-resp opened remote exec primitive + # <=) recv-resp closed + # + # )<=c req to stop due to cancel + # c=>) req to stop due to cancel + # + # =>{ recv-req to open + # <={ send-status that it closed + # + if ( + nest_prefix + and + nest_indent != 0 + ): + if nest_indent is not None: + nest_prefix: str = textwrap.indent( + nest_prefix, + prefix=nest_indent*' ', + ) + nest_indent: int = len(nest_prefix) + + # determine body-text indent either by, + # - using wtv explicit indent value is provided, + # OR + # - auto-calcing the indent to embed `text` under + # the `nest_prefix` if provided, **IFF** `nest_indent=None`. + tree_str_indent: int = 0 + if nest_indent not in {0, None}: + tree_str_indent = nest_indent + elif ( + nest_prefix + and + nest_indent != 0 + ): + tree_str_indent = len(nest_prefix) + + indented_tree_str: str = text + if tree_str_indent: + indented_tree_str: str = textwrap.indent( + text, + prefix=' '*tree_str_indent, + ) + + # inject any provided nesting-prefix chars + # into the head of the first line. + if nest_prefix: + indented_tree_str: str = ( + f'{nest_prefix}{indented_tree_str[tree_str_indent:]}' + ) + + if ( + not prefix_op + or + rm_from_first_ln + ): + tree_lns: list[str] = indented_tree_str.splitlines() + first: str = tree_lns[0] + if rm_from_first_ln: + first = first.strip().replace( + rm_from_first_ln, + '', + ) + indented_tree_str: str = '\n'.join(tree_lns[1:]) + + if prefix_op: + indented_tree_str = ( + f'{first}\n' + f'{indented_tree_str}' + ) + + if prefix_op: + return ( + f'{input_op}{op_suffix}' + f'{indented_tree_str}' + ) + else: + return ( + f'{first}{input_op}{op_suffix}' + f'{indented_tree_str}' + ) + + +# ------ modden.repr ------ +# XXX originally taken verbaatim from `modden.repr` +''' +More "multi-line" representation then the stdlib's `pprint` equivs. + +''' +from inspect import ( + FrameInfo, + stack, +) +import pprint +import reprlib +from typing import ( + Callable, +) + + +def mk_repr( + **repr_kws, +) -> Callable[[str], str]: + ''' + Allocate and deliver a `repr.Repr` instance with provided input + settings using the std-lib's `reprlib` mod, + * https://docs.python.org/3/library/reprlib.html + + ------ Ex. ------ + An up to 6-layer-nested `dict` as multi-line: + - https://stackoverflow.com/a/79102479 + - https://docs.python.org/3/library/reprlib.html#reprlib.Repr.maxlevel + + ''' + def_kws: dict[str, int] = dict( + indent=3, # indent used for repr of recursive objects + maxlevel=616, # recursion levels + maxdict=616, # max items shown for `dict` + maxlist=616, # max items shown for `dict` + maxstring=616, # match editor line-len limit + maxtuple=616, # match editor line-len limit + maxother=616, # match editor line-len limit + ) + def_kws |= repr_kws + reprr = reprlib.Repr(**def_kws) + return reprr.repr + + +def ppfmt( + obj: object, + do_print: bool = False, +) -> str: + ''' + The `pprint.pformat()` version of `pprint.pp()`, namely + a default `sort_dicts=False`.. (which i think should be + the normal default in the stdlib). + + ''' + pprepr: Callable = mk_repr() + repr_str: str = pprepr(obj) + + if do_print: + return pprint.pp(repr_str) + + return repr_str + + +pformat = ppfmt + + +def pfmt_frame_info(fi: FrameInfo) -> str: + ''' + Like a std `inspect.FrameInfo.__repr__()` but multi-line.. + + ''' + return ( + 'FrameInfo(\n' + ' frame={!r},\n' + ' filename={!r},\n' + ' lineno={!r},\n' + ' function={!r},\n' + ' code_context={!r},\n' + ' index={!r},\n' + ' positions={!r})' + ).format( + fi.frame, + fi.filename, + fi.lineno, + fi.function, + fi.code_context, + fi.index, + fi.positions + ) + + +def pfmt_callstack(frames: int = 1) -> str: + ''' + Generate a string of nested `inspect.FrameInfo` objects returned + from a `inspect.stack()` call such that only the `.frame` field + for each layer is pprinted. + + ''' + caller_frames: list[FrameInfo] = stack()[1:1+frames] + frames_str: str = '' + for i, frame_info in enumerate(caller_frames): + frames_str += textwrap.indent( + f'{frame_info.frame!r}\n', + prefix=' '*i, + + ) + return frames_str diff --git a/tractor/experimental/_pubsub.py b/tractor/experimental/_pubsub.py index b894ed49..bc5881e1 100644 --- a/tractor/experimental/_pubsub.py +++ b/tractor/experimental/_pubsub.py @@ -45,6 +45,8 @@ __all__ = ['pub'] log = get_logger('messaging') +# TODO! this needs to reworked to use the modern +# `Context`/`MsgStream` APIs!! async def fan_out_to_ctxs( pub_async_gen_func: typing.Callable, # it's an async gen ... gd mypy topics2ctxs: dict[str, list], diff --git a/tractor/hilevel/__init__.py b/tractor/hilevel/__init__.py new file mode 100644 index 00000000..cf2741d8 --- /dev/null +++ b/tractor/hilevel/__init__.py @@ -0,0 +1,26 @@ +# tractor: structured concurrent "actors". +# Copyright 2024-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +High level design patterns, APIs and runtime extensions built on top +of the `tractor` runtime core. + +''' +from ._service import ( + open_service_mngr as open_service_mngr, + get_service_mngr as get_service_mngr, + ServiceMngr as ServiceMngr, +) diff --git a/tractor/hilevel/_service.py b/tractor/hilevel/_service.py new file mode 100644 index 00000000..70dddbdf --- /dev/null +++ b/tractor/hilevel/_service.py @@ -0,0 +1,592 @@ +# tractor: structured concurrent "actors". +# Copyright 2024-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +Daemon subactor as service(s) management and supervision primitives +and API. + +''' +from __future__ import annotations +from contextlib import ( + asynccontextmanager as acm, + # contextmanager as cm, +) +from collections import defaultdict +from dataclasses import ( + dataclass, + field, +) +import functools +import inspect +from typing import ( + Callable, + Any, +) + +import tractor +import trio +from trio import TaskStatus +from tractor import ( + log, + ActorNursery, + current_actor, + ContextCancelled, + Context, + Portal, +) + +log = log.get_logger('tractor') + + +# TODO: implement a `@singleton` deco-API for wrapping the below +# factory's impl for general actor-singleton use? +# +# -[ ] go through the options peeps on SO did? +# * https://stackoverflow.com/questions/6760685/what-is-the-best-way-of-implementing-singleton-in-python +# * including @mikenerone's answer +# |_https://stackoverflow.com/questions/6760685/what-is-the-best-way-of-implementing-singleton-in-python/39186313#39186313 +# +# -[ ] put it in `tractor.lowlevel._globals` ? +# * fits with our oustanding actor-local/global feat req? +# |_ https://github.com/goodboy/tractor/issues/55 +# * how can it relate to the `Actor.lifetime_stack` that was +# silently patched in? +# |_ we could implicitly call both of these in the same +# spot in the runtime using the lifetime stack? +# - `open_singleton_cm().__exit__()` +# -`del_singleton()` +# |_ gives SC fixtue semantics to sync code oriented around +# sub-process lifetime? +# * what about with `trio.RunVar`? +# |_https://trio.readthedocs.io/en/stable/reference-lowlevel.html#trio.lowlevel.RunVar +# - which we'll need for no-GIL cpython (right?) presuming +# multiple `trio.run()` calls in process? +# +# +# @singleton +# async def open_service_mngr( +# **init_kwargs, +# ) -> ServiceMngr: +# ''' +# Note this function body is invoke IFF no existing singleton instance already +# exists in this proc's memory. + +# ''' +# # setup +# yield ServiceMngr(**init_kwargs) +# # teardown + + +# a deletion API for explicit instance de-allocation? +# @open_service_mngr.deleter +# def del_service_mngr() -> None: +# mngr = open_service_mngr._singleton[0] +# open_service_mngr._singleton[0] = None +# del mngr + + + +# TODO: implement a singleton deco-API for wrapping the below +# factory's impl for general actor-singleton use? +# +# @singleton +# async def open_service_mngr( +# **init_kwargs, +# ) -> ServiceMngr: +# ''' +# Note this function body is invoke IFF no existing singleton instance already +# exists in this proc's memory. + +# ''' +# # setup +# yield ServiceMngr(**init_kwargs) +# # teardown + + + +# TODO: singleton factory API instead of a class API +@acm +async def open_service_mngr( + *, + debug_mode: bool = False, + + # NOTE; since default values for keyword-args are effectively + # module-vars/globals as per the note from, + # https://docs.python.org/3/tutorial/controlflow.html#default-argument-values + # + # > "The default value is evaluated only once. This makes + # a difference when the default is a mutable object such as + # a list, dictionary, or instances of most classes" + # + _singleton: list[ServiceMngr|None] = [None], + **init_kwargs, + +) -> ServiceMngr: + ''' + Open an actor-global "service-manager" for supervising a tree + of subactors and/or actor-global tasks. + + The delivered `ServiceMngr` is singleton instance for each + actor-process, that is, allocated on first open and never + de-allocated unless explicitly deleted by al call to + `del_service_mngr()`. + + ''' + # TODO: factor this an allocation into + # a `._mngr.open_service_mngr()` and put in the + # once-n-only-once setup/`.__aenter__()` part! + # -[ ] how to make this only happen on the `mngr == None` case? + # |_ use `.trionics.maybe_open_context()` (for generic + # async-with-style-only-once of the factory impl, though + # what do we do for the allocation case? + # / `.maybe_open_nursery()` (since for this specific case + # it's simpler?) to activate + async with ( + tractor.open_nursery() as an, + trio.open_nursery() as tn, + ): + # impl specific obvi.. + init_kwargs.update({ + 'an': an, + 'tn': tn, + }) + + mngr: ServiceMngr|None + if (mngr := _singleton[0]) is None: + + log.info('Allocating a new service mngr!') + mngr = _singleton[0] = ServiceMngr(**init_kwargs) + + # TODO: put into `.__aenter__()` section of + # eventual `@singleton_acm` API wrapper. + # + # assign globally for future daemon/task creation + mngr.an = an + mngr.tn = tn + + else: + assert (mngr.an and mngr.tn) + log.info( + 'Using extant service mngr!\n\n' + f'{mngr!r}\n' # it has a nice `.__repr__()` of services state + ) + + try: + # NOTE: this is a singleton factory impl specific detail + # which should be supported in the condensed + # `@singleton_acm` API? + mngr.debug_mode = debug_mode + + yield mngr + finally: + # TODO: is this more clever/efficient? + # if 'samplerd' in mngr.service_ctxs: + # await mngr.cancel_service('samplerd') + tn.cancel_scope.cancel() + + + +def get_service_mngr() -> ServiceMngr: + ''' + Try to get the singleton service-mngr for this actor presuming it + has already been allocated using, + + .. code:: python + + async with open_<@singleton_acm(func)>() as mngr` + ... this block kept open ... + + If not yet allocated raise a `ServiceError`. + + ''' + # https://stackoverflow.com/a/12627202 + # https://docs.python.org/3/library/inspect.html#inspect.Signature + maybe_mngr: ServiceMngr|None = inspect.signature( + open_service_mngr + ).parameters['_singleton'].default[0] + + if maybe_mngr is None: + raise RuntimeError( + 'Someone must allocate a `ServiceMngr` using\n\n' + '`async with open_service_mngr()` beforehand!!\n' + ) + + return maybe_mngr + + +async def _open_and_supervise_service_ctx( + serman: ServiceMngr, + name: str, + ctx_fn: Callable, # TODO, type for `@tractor.context` requirement + portal: Portal, + + allow_overruns: bool = False, + task_status: TaskStatus[ + tuple[ + trio.CancelScope, + Context, + trio.Event, + Any, + ] + ] = trio.TASK_STATUS_IGNORED, + **ctx_kwargs, + +) -> Any: + ''' + Open a remote IPC-context defined by `ctx_fn` in the + (service) actor accessed via `portal` and supervise the + (local) parent task to termination at which point the remote + actor runtime is cancelled alongside it. + + The main application is for allocating long-running + "sub-services" in a main daemon and explicitly controlling + their lifetimes from an actor-global singleton. + + ''' + # TODO: use the ctx._scope directly here instead? + # -[ ] actually what semantics do we expect for this + # usage!? + with trio.CancelScope() as cs: + try: + async with portal.open_context( + ctx_fn, + allow_overruns=allow_overruns, + **ctx_kwargs, + + ) as (ctx, started): + + # unblock once the remote context has started + complete = trio.Event() + task_status.started(( + cs, + ctx, + complete, + started, + )) + log.info( + f'`pikerd` service {name} started with value {started}' + ) + # wait on any context's return value + # and any final portal result from the + # sub-actor. + ctx_res: Any = await ctx.wait_for_result() + + # NOTE: blocks indefinitely until cancelled + # either by error from the target context + # function or by being cancelled here by the + # surrounding cancel scope. + return ( + await portal.wait_for_result(), + ctx_res, + ) + + except ContextCancelled as ctxe: + canceller: tuple[str, str] = ctxe.canceller + our_uid: tuple[str, str] = current_actor().uid + if ( + canceller != portal.chan.uid + and + canceller != our_uid + ): + log.cancel( + f'Actor-service `{name}` was remotely cancelled by a peer?\n' + + # TODO: this would be a good spot to use + # a respawn feature Bo + f'-> Keeping `pikerd` service manager alive despite this inter-peer cancel\n\n' + + f'cancellee: {portal.chan.uid}\n' + f'canceller: {canceller}\n' + ) + else: + raise + + finally: + # NOTE: the ctx MUST be cancelled first if we + # don't want the above `ctx.wait_for_result()` to + # raise a self-ctxc. WHY, well since from the ctx's + # perspective the cancel request will have + # arrived out-out-of-band at the `Actor.cancel()` + # level, thus `Context.cancel_called == False`, + # meaning `ctx._is_self_cancelled() == False`. + # with trio.CancelScope(shield=True): + # await ctx.cancel() + await portal.cancel_actor() # terminate (remote) sub-actor + complete.set() # signal caller this task is done + serman.service_ctxs.pop(name) # remove mngr entry + + +# TODO: we need remote wrapping and a general soln: +# - factor this into a ``tractor.highlevel`` extension # pack for the +# library. +# - wrap a "remote api" wherein you can get a method proxy +# to the pikerd actor for starting services remotely! +# - prolly rename this to ActorServicesNursery since it spawns +# new actors and supervises them to completion? +@dataclass +class ServiceMngr: + ''' + A multi-subactor-as-service manager. + + Spawn, supervise and monitor service/daemon subactors in a SC + process tree. + + ''' + an: ActorNursery + tn: trio.Nursery + debug_mode: bool = False # tractor sub-actor debug mode flag + + service_tasks: dict[ + str, + tuple[ + trio.CancelScope, + trio.Event, + ] + ] = field(default_factory=dict) + + service_ctxs: dict[ + str, + tuple[ + trio.CancelScope, + Context, + Portal, + trio.Event, + ] + ] = field(default_factory=dict) + + # internal per-service task mutexs + _locks = defaultdict(trio.Lock) + + # TODO, unify this interface with our `TaskManager` PR! + # + # + async def start_service_task( + self, + name: str, + # TODO: typevar for the return type of the target and then + # use it below for `ctx_res`? + fn: Callable, + + allow_overruns: bool = False, + **ctx_kwargs, + + ) -> tuple[ + trio.CancelScope, + Any, + trio.Event, + ]: + async def _task_manager_start( + task_status: TaskStatus[ + tuple[ + trio.CancelScope, + trio.Event, + ] + ] = trio.TASK_STATUS_IGNORED, + ) -> Any: + + task_cs = trio.CancelScope() + task_complete = trio.Event() + + with task_cs as cs: + task_status.started(( + cs, + task_complete, + )) + try: + await fn() + except trio.Cancelled as taskc: + log.cancel( + f'Service task for `{name}` was cancelled!\n' + # TODO: this would be a good spot to use + # a respawn feature Bo + ) + raise taskc + finally: + task_complete.set() + ( + cs, + complete, + ) = await self.tn.start(_task_manager_start) + + # store the cancel scope and portal for later cancellation or + # retstart if needed. + self.service_tasks[name] = ( + cs, + complete, + ) + return ( + cs, + complete, + ) + + async def cancel_service_task( + self, + name: str, + + ) -> Any: + log.info(f'Cancelling `pikerd` service {name}') + cs, complete = self.service_tasks[name] + + cs.cancel() + await complete.wait() + # TODO, if we use the `TaskMngr` from #346 + # we can also get the return value from the task! + + if name in self.service_tasks: + # TODO: custom err? + # raise ServiceError( + raise RuntimeError( + f'Service task {name!r} not terminated!?\n' + ) + + async def start_service_ctx( + self, + name: str, + portal: Portal, + # TODO: typevar for the return type of the target and then + # use it below for `ctx_res`? + ctx_fn: Callable, + **ctx_kwargs, + + ) -> tuple[ + trio.CancelScope, + Context, + Any, + ]: + ''' + Start a remote IPC-context defined by `ctx_fn` in a background + task and immediately return supervision primitives to manage it: + + - a `cs: CancelScope` for the newly allocated bg task + - the `ipc_ctx: Context` to manage the remotely scheduled + `trio.Task`. + - the `started: Any` value returned by the remote endpoint + task's `Context.started()` call. + + The bg task supervises the ctx such that when it terminates the supporting + actor runtime is also cancelled, see `_open_and_supervise_service_ctx()` + for details. + + ''' + cs, ipc_ctx, complete, started = await self.tn.start( + functools.partial( + _open_and_supervise_service_ctx, + serman=self, + name=name, + ctx_fn=ctx_fn, + portal=portal, + **ctx_kwargs, + ) + ) + + # store the cancel scope and portal for later cancellation or + # retstart if needed. + self.service_ctxs[name] = (cs, ipc_ctx, portal, complete) + return ( + cs, + ipc_ctx, + started, + ) + + async def start_service( + self, + daemon_name: str, + ctx_ep: Callable, # kwargs must `partial`-ed in! + # ^TODO, type for `@tractor.context` deco-ed funcs! + + debug_mode: bool = False, + **start_actor_kwargs, + + ) -> Context: + ''' + Start new subactor and schedule a supervising "service task" + in it which explicitly defines the sub's lifetime. + + "Service daemon subactors" are cancelled (and thus + terminated) using the paired `.cancel_service()`. + + Effectively this API can be used to manage "service daemons" + spawned under a single parent actor with supervision + semantics equivalent to a one-cancels-one style actor-nursery + or "(subactor) task manager" where each subprocess's (and + thus its embedded actor runtime) lifetime is synced to that + of the remotely spawned task defined by `ctx_ep`. + + The funcionality can be likened to a "daemonized" version of + `.hilevel.worker.run_in_actor()` but with supervision + controls offered by `tractor.Context` where the main/root + remotely scheduled `trio.Task` invoking `ctx_ep` determines + the underlying subactor's lifetime. + + ''' + entry: tuple|None = self.service_ctxs.get(daemon_name) + if entry: + (cs, sub_ctx, portal, complete) = entry + return sub_ctx + + if daemon_name not in self.service_ctxs: + portal: Portal = await self.an.start_actor( + daemon_name, + debug_mode=( # maybe set globally during allocate + debug_mode + or + self.debug_mode + ), + **start_actor_kwargs, + ) + ctx_kwargs: dict[str, Any] = {} + if isinstance(ctx_ep, functools.partial): + ctx_kwargs: dict[str, Any] = ctx_ep.keywords + ctx_ep: Callable = ctx_ep.func + + ( + cs, + sub_ctx, + started, + ) = await self.start_service_ctx( + name=daemon_name, + portal=portal, + ctx_fn=ctx_ep, + **ctx_kwargs, + ) + + return sub_ctx + + async def cancel_service( + self, + name: str, + + ) -> Any: + ''' + Cancel the service task and actor for the given ``name``. + + ''' + log.info(f'Cancelling `pikerd` service {name}') + cs, sub_ctx, portal, complete = self.service_ctxs[name] + + # cs.cancel() + await sub_ctx.cancel() + await complete.wait() + + if name in self.service_ctxs: + # TODO: custom err? + # raise ServiceError( + raise RuntimeError( + f'Service actor for {name} not terminated and/or unknown?' + ) + + # assert name not in self.service_ctxs, \ + # f'Serice task for {name} not terminated?' diff --git a/tractor/ipc/__init__.py b/tractor/ipc/__init__.py new file mode 100644 index 00000000..2c6c3b5d --- /dev/null +++ b/tractor/ipc/__init__.py @@ -0,0 +1,24 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +A modular IPC layer supporting the power of cross-process SC! + +''' +from ._chan import ( + _connect_chan as _connect_chan, + Channel as Channel +) diff --git a/tractor/ipc/_chan.py b/tractor/ipc/_chan.py new file mode 100644 index 00000000..dcb0d6ad --- /dev/null +++ b/tractor/ipc/_chan.py @@ -0,0 +1,503 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +Inter-process comms abstractions + +""" +from __future__ import annotations +from collections.abc import AsyncGenerator +from contextlib import ( + asynccontextmanager as acm, + contextmanager as cm, +) +import platform +from pprint import pformat +import typing +from typing import ( + Any, + TYPE_CHECKING, +) +import warnings + +import trio + +from ._types import ( + transport_from_addr, + transport_from_stream, +) +from tractor._addr import ( + is_wrapped_addr, + wrap_address, + Address, + UnwrappedAddress, +) +from tractor.log import get_logger +from tractor._exceptions import ( + MsgTypeError, + pack_from_raise, + TransportClosed, +) +from tractor.msg import ( + Aid, + MsgCodec, +) + +if TYPE_CHECKING: + from ._transport import MsgTransport + + +log = get_logger(__name__) + +_is_windows = platform.system() == 'Windows' + + +class Channel: + ''' + An inter-process channel for communication between (remote) actors. + + Wraps a ``MsgStream``: transport + encoding IPC connection. + + Currently we only support ``trio.SocketStream`` for transport + (aka TCP) and the ``msgpack`` interchange format via the ``msgspec`` + codec libary. + + ''' + def __init__( + + self, + transport: MsgTransport|None = None, + # TODO: optional reconnection support? + # auto_reconnect: bool = False, + # on_reconnect: typing.Callable[..., typing.Awaitable] = None, + + ) -> None: + + # self._recon_seq = on_reconnect + # self._autorecon = auto_reconnect + + # Either created in ``.connect()`` or passed in by + # user in ``.from_stream()``. + self._transport: MsgTransport|None = transport + + # set after handshake - always info from peer end + self.aid: Aid|None = None + + self._aiter_msgs = self._iter_msgs() + self._exc: Exception|None = None + # ^XXX! ONLY set if a remote actor sends an `Error`-msg + self._closed: bool = False + + # flag set by `Portal.cancel_actor()` indicating remote + # (possibly peer) cancellation of the far end actor runtime. + self._cancel_called: bool = False + + @property + def closed(self) -> bool: + ''' + Was `.aclose()` successfully called? + + ''' + return self._closed + + @property + def cancel_called(self) -> bool: + ''' + Set when `Portal.cancel_actor()` is called on a portal which + wraps this IPC channel. + + ''' + return self._cancel_called + + @property + def uid(self) -> tuple[str, str]: + ''' + Peer actor's unique id. + + ''' + msg: str = ( + f'`{type(self).__name__}.uid` is now deprecated.\n' + 'Use the new `.aid: tractor.msg.Aid` (struct) instead ' + 'which also provides additional named (optional) fields ' + 'beyond just the `.name` and `.uuid`.' + ) + warnings.warn( + msg, + DeprecationWarning, + stacklevel=2, + ) + peer_aid: Aid = self.aid + return ( + peer_aid.name, + peer_aid.uuid, + ) + + @property + def stream(self) -> trio.abc.Stream | None: + return self._transport.stream if self._transport else None + + @property + def msgstream(self) -> MsgTransport: + log.info( + '`Channel.msgstream` is an old name, use `._transport`' + ) + return self._transport + + @property + def transport(self) -> MsgTransport: + return self._transport + + @classmethod + def from_stream( + cls, + stream: trio.abc.Stream, + ) -> Channel: + transport_cls = transport_from_stream(stream) + return Channel( + transport=transport_cls(stream) + ) + + @classmethod + async def from_addr( + cls, + addr: UnwrappedAddress, + **kwargs + ) -> Channel: + + if not is_wrapped_addr(addr): + addr: Address = wrap_address(addr) + + transport_cls = transport_from_addr(addr) + transport = await transport_cls.connect_to( + addr, + **kwargs, + ) + # XXX, for UDS *no!* since we recv the peer-pid and build out + # a new addr.. + # assert transport.raddr == addr + chan = Channel(transport=transport) + + # ?TODO, compact this into adapter level-methods? + # -[ ] would avoid extra repr-calcs if level not active? + # |_ how would the `calc_if_level` look though? func? + if log.at_least_level('runtime'): + from tractor.devx import ( + pformat as _pformat, + ) + chan_repr: str = _pformat.nest_from_op( + input_op='[>', + text=chan.pformat(), + nest_indent=1, + ) + log.runtime( + f'Connected channel IPC transport\n' + f'{chan_repr}' + ) + return chan + + @cm + def apply_codec( + self, + codec: MsgCodec, + ) -> None: + ''' + Temporarily override the underlying IPC msg codec for + dynamic enforcement of messaging schema. + + ''' + orig: MsgCodec = self._transport.codec + try: + self._transport.codec = codec + yield + finally: + self._transport.codec = orig + + # TODO: do a .src/.dst: str for maddrs? + def pformat( + self, + privates: bool = False, + ) -> str: + if not self._transport: + return '' + + tpt: MsgTransport = self._transport + tpt_name: str = type(tpt).__name__ + tpt_status: str = ( + 'connected' if self.connected() + else 'closed' + ) + repr_str: str = ( + f'\n' + ) + ( + f' |_msgstream: {tpt_name}\n' + f' maddr: {tpt.maddr!r}\n' + f' proto: {tpt.laddr.proto_key!r}\n' + f' layer: {tpt.layer_key!r}\n' + f' codec: {tpt.codec_key!r}\n' + f' .laddr={tpt.laddr}\n' + f' .raddr={tpt.raddr}\n' + ) + ( + f' ._transport.stream={tpt.stream}\n' + f' ._transport.drained={tpt.drained}\n' + if privates else '' + ) + ( + f' _send_lock={tpt._send_lock.statistics()}\n' + if privates else '' + ) + ( + ')>\n' + ) + return repr_str + + # NOTE: making this return a value that can be passed to + # `eval()` is entirely **optional** FYI! + # https://docs.python.org/3/library/functions.html#repr + # https://docs.python.org/3/reference/datamodel.html#object.__repr__ + # + # Currently we target **readability** from a (console) + # logging perspective over `eval()`-ability since we do NOT + # target serializing non-struct instances! + # def __repr__(self) -> str: + __str__ = pformat + __repr__ = pformat + + @property + def laddr(self) -> Address|None: + return self._transport.laddr if self._transport else None + + @property + def raddr(self) -> Address|None: + return self._transport.raddr if self._transport else None + + @property + def maddr(self) -> str: + return self._transport.maddr if self._transport else '' + + # TODO: something like, + # `pdbp.hideframe_on(errors=[MsgTypeError])` + # instead of the `try/except` hack we have rn.. + # seems like a pretty useful thing to have in general + # along with being able to filter certain stack frame(s / sets) + # possibly based on the current log-level? + async def send( + self, + payload: Any, + + hide_tb: bool = False, + + ) -> None: + ''' + Send a coded msg-blob over the transport. + + ''' + __tracebackhide__: bool = hide_tb + try: + log.transport( + '=> send IPC msg:\n\n' + f'{pformat(payload)}\n' + ) + # assert self._transport # but why typing? + await self._transport.send( + payload, + hide_tb=hide_tb, + ) + except ( + BaseException, + MsgTypeError, + TransportClosed, + )as _err: + err = _err # bind for introspection + match err: + case MsgTypeError(): + try: + assert err.cid + except KeyError: + raise err + case TransportClosed(): + log.transport( + f'Transport stream closed due to\n' + f'{err.repr_src_exc()}\n' + ) + + case _: + # never suppress non-tpt sources + __tracebackhide__: bool = False + raise + + async def recv(self) -> Any: + assert self._transport + return await self._transport.recv() + + # TODO: auto-reconnect features like 0mq/nanomsg? + # -[ ] implement it manually with nods to SC prot + # possibly on multiple transport backends? + # -> seems like that might be re-inventing scalability + # prots tho no? + # try: + # return await self._transport.recv() + # except trio.BrokenResourceError: + # if self._autorecon: + # await self._reconnect() + # return await self.recv() + # raise + + async def aclose(self) -> None: + + log.transport( + f'Closing channel to {self.aid} ' + f'{self.laddr} -> {self.raddr}' + ) + assert self._transport + await self._transport.stream.aclose() + self._closed = True + + async def __aenter__(self): + await self.connect() + return self + + async def __aexit__(self, *args): + await self.aclose(*args) + + def __aiter__(self): + return self._aiter_msgs + + # ?TODO? run any reconnection sequence? + # -[ ] prolly should be impl-ed as deco-API? + # + # async def _reconnect(self) -> None: + # """Handle connection failures by polling until a reconnect can be + # established. + # """ + # down = False + # while True: + # try: + # with trio.move_on_after(3) as cancel_scope: + # await self.connect() + # cancelled = cancel_scope.cancelled_caught + # if cancelled: + # log.transport( + # "Reconnect timed out after 3 seconds, retrying...") + # continue + # else: + # log.transport("Stream connection re-established!") + + # # on_recon = self._recon_seq + # # if on_recon: + # # await on_recon(self) + + # break + # except (OSError, ConnectionRefusedError): + # if not down: + # down = True + # log.transport( + # f"Connection to {self.raddr} went down, waiting" + # " for re-establishment") + # await trio.sleep(1) + + async def _iter_msgs( + self + ) -> AsyncGenerator[Any, None]: + ''' + Yield `MsgType` IPC msgs decoded and deliverd from + an underlying `MsgTransport` protocol. + + This is a streaming routine alo implemented as an async-gen + func (same a `MsgTransport._iter_pkts()`) gets allocated by + a `.__call__()` inside `.__init__()` where it is assigned to + the `._aiter_msgs` attr. + + ''' + assert self._transport + while True: + try: + async for msg in self._transport: + match msg: + # NOTE: if transport/interchange delivers + # a type error, we pack it with the far + # end peer `Actor.uid` and relay the + # `Error`-msg upward to the `._rpc` stack + # for normal RAE handling. + case MsgTypeError(): + yield pack_from_raise( + local_err=msg, + cid=msg.cid, + + # XXX we pack it here bc lower + # layers have no notion of an + # actor-id ;) + src_uid=self.uid, + ) + case _: + yield msg + + except trio.BrokenResourceError: + + # if not self._autorecon: + raise + + await self.aclose() + + # if self._autorecon: # attempt reconnect + # await self._reconnect() + # continue + + def connected(self) -> bool: + return self._transport.connected() if self._transport else False + + async def _do_handshake( + self, + aid: Aid, + + ) -> Aid: + ''' + Exchange `(name, UUIDs)` identifiers as the first + communication step with any (peer) remote `Actor`. + + These are essentially the "mailbox addresses" found in + "actor model" parlance. + + ''' + await self.send(aid) + peer_aid: Aid = await self.recv() + log.runtime( + f'Received hanshake with peer\n' + f'<= {peer_aid.reprol(sin_uuid=False)}\n' + ) + # NOTE, we always are referencing the remote peer! + self.aid = peer_aid + return peer_aid + + +@acm +async def _connect_chan( + addr: UnwrappedAddress +) -> typing.AsyncGenerator[Channel, None]: + ''' + Create and connect a channel with disconnect on context manager + teardown. + + ''' + chan = await Channel.from_addr(addr) + yield chan + with trio.CancelScope(shield=True): + await chan.aclose() diff --git a/tractor/ipc/_fd_share.py b/tractor/ipc/_fd_share.py new file mode 100644 index 00000000..e51069ba --- /dev/null +++ b/tractor/ipc/_fd_share.py @@ -0,0 +1,163 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +File-descriptor-sharing on `linux` by "wilhelm_of_bohemia". + +''' +from __future__ import annotations +import os +import array +import socket +import tempfile +from pathlib import Path +from contextlib import ExitStack + +import trio +import tractor +from tractor.ipc import RBToken + + +actor_name = 'ringd' + + +_rings: dict[str, dict] = {} + + +async def _attach_to_ring( + ring_name: str +) -> tuple[int, int, int]: + actor = tractor.current_actor() + + fd_amount = 3 + sock_path = ( + Path(tempfile.gettempdir()) + / + f'{os.getpid()}-pass-ring-fds-{ring_name}-to-{actor.name}.sock' + ) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.bind(sock_path) + sock.listen(1) + + async with ( + tractor.find_actor(actor_name) as ringd, + ringd.open_context( + _pass_fds, + name=ring_name, + sock_path=sock_path + ) as (ctx, _sent) + ): + # prepare array to receive FD + fds = array.array("i", [0] * fd_amount) + + conn, _ = sock.accept() + + # receive FD + msg, ancdata, flags, addr = conn.recvmsg( + 1024, + socket.CMSG_LEN(fds.itemsize * fd_amount) + ) + + for ( + cmsg_level, + cmsg_type, + cmsg_data, + ) in ancdata: + if ( + cmsg_level == socket.SOL_SOCKET + and + cmsg_type == socket.SCM_RIGHTS + ): + fds.frombytes(cmsg_data[:fds.itemsize * fd_amount]) + break + else: + raise RuntimeError("Receiver: No FDs received") + + conn.close() + sock.close() + sock_path.unlink() + + return RBToken.from_msg( + await ctx.wait_for_result() + ) + + +@tractor.context +async def _pass_fds( + ctx: tractor.Context, + name: str, + sock_path: str +) -> RBToken: + global _rings + token = _rings[name] + client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + client.connect(sock_path) + await ctx.started() + fds = array.array('i', token.fds) + client.sendmsg([b'FDs'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + client.close() + return token + + +@tractor.context +async def _open_ringbuf( + ctx: tractor.Context, + name: str, + buf_size: int +) -> RBToken: + global _rings + is_owner = False + if name not in _rings: + stack = ExitStack() + token = stack.enter_context( + tractor.open_ringbuf( + name, + buf_size=buf_size + ) + ) + _rings[name] = { + 'token': token, + 'stack': stack, + } + is_owner = True + + ring = _rings[name] + await ctx.started() + + try: + await trio.sleep_forever() + + except tractor.ContextCancelled: + ... + + finally: + if is_owner: + ring['stack'].close() + + +async def open_ringbuf( + name: str, + buf_size: int +) -> RBToken: + async with ( + tractor.find_actor(actor_name) as ringd, + ringd.open_context( + _open_ringbuf, + name=name, + buf_size=buf_size + ) as (rd_ctx, _) + ): + yield await _attach_to_ring(name) + await rd_ctx.cancel() diff --git a/tractor/ipc/_linux.py b/tractor/ipc/_linux.py new file mode 100644 index 00000000..88d80d1c --- /dev/null +++ b/tractor/ipc/_linux.py @@ -0,0 +1,153 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +Linux specifics, for now we are only exposing EventFD + +''' +import os +import errno + +import cffi +import trio + +ffi = cffi.FFI() + +# Declare the C functions and types we plan to use. +# - eventfd: for creating the event file descriptor +# - write: for writing to the file descriptor +# - read: for reading from the file descriptor +# - close: for closing the file descriptor +ffi.cdef( + ''' + int eventfd(unsigned int initval, int flags); + + ssize_t write(int fd, const void *buf, size_t count); + ssize_t read(int fd, void *buf, size_t count); + + int close(int fd); + ''' +) + + +# Open the default dynamic library (essentially 'libc' in most cases) +C = ffi.dlopen(None) + + +# Constants from , if needed. +EFD_SEMAPHORE = 1 +EFD_CLOEXEC = 0o2000000 +EFD_NONBLOCK = 0o4000 + + +def open_eventfd(initval: int = 0, flags: int = 0) -> int: + ''' + Open an eventfd with the given initial value and flags. + Returns the file descriptor on success, otherwise raises OSError. + + ''' + fd = C.eventfd(initval, flags) + if fd < 0: + raise OSError(errno.errorcode[ffi.errno], 'eventfd failed') + return fd + + +def write_eventfd(fd: int, value: int) -> int: + ''' + Write a 64-bit integer (uint64_t) to the eventfd's counter. + + ''' + # Create a uint64_t* in C, store `value` + data_ptr = ffi.new('uint64_t *', value) + + # Call write(fd, data_ptr, 8) + # We expect to write exactly 8 bytes (sizeof(uint64_t)) + ret = C.write(fd, data_ptr, 8) + if ret < 0: + raise OSError(errno.errorcode[ffi.errno], 'write to eventfd failed') + return ret + + +def read_eventfd(fd: int) -> int: + ''' + Read a 64-bit integer (uint64_t) from the eventfd, returning the value. + Reading resets the counter to 0 (unless using EFD_SEMAPHORE). + + ''' + # Allocate an 8-byte buffer in C for reading + buf = ffi.new('char[]', 8) + + ret = C.read(fd, buf, 8) + if ret < 0: + raise OSError(errno.errorcode[ffi.errno], 'read from eventfd failed') + # Convert the 8 bytes we read into a Python integer + data_bytes = ffi.unpack(buf, 8) # returns a Python bytes object of length 8 + value = int.from_bytes(data_bytes, byteorder='little', signed=False) + return value + + +def close_eventfd(fd: int) -> int: + ''' + Close the eventfd. + + ''' + ret = C.close(fd) + if ret < 0: + raise OSError(errno.errorcode[ffi.errno], 'close failed') + + +class EventFD: + ''' + Use a previously opened eventfd(2), meant to be used in + sub-actors after root actor opens the eventfds then passes + them through pass_fds + + ''' + + def __init__( + self, + fd: int, + omode: str + ): + self._fd: int = fd + self._omode: str = omode + self._fobj = None + + @property + def fd(self) -> int | None: + return self._fd + + def write(self, value: int) -> int: + return write_eventfd(self._fd, value) + + async def read(self) -> int: + return await trio.to_thread.run_sync( + read_eventfd, self._fd, + abandon_on_cancel=True + ) + + def open(self): + self._fobj = os.fdopen(self._fd, self._omode) + + def close(self): + if self._fobj: + self._fobj.close() + + def __enter__(self): + self.open() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() diff --git a/tractor/ipc/_mp_bs.py b/tractor/ipc/_mp_bs.py new file mode 100644 index 00000000..462291c6 --- /dev/null +++ b/tractor/ipc/_mp_bs.py @@ -0,0 +1,75 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +Utils to tame mp non-SC madeness + +''' +import platform + + +def disable_mantracker(): + ''' + Disable all `multiprocessing` "resource tracking" machinery since + it's an absolute multi-threaded mess of non-SC madness. + + ''' + from multiprocessing.shared_memory import SharedMemory + + + # 3.13+ only.. can pass `track=False` to disable + # all the resource tracker bs. + # https://docs.python.org/3/library/multiprocessing.shared_memory.html + if (_py_313 := ( + platform.python_version_tuple()[:-1] + >= + ('3', '13') + ) + ): + from functools import partial + return partial( + SharedMemory, + track=False, + ) + + # !TODO, once we drop 3.12- we can obvi remove all this! + else: + from multiprocessing import ( + resource_tracker as mantracker, + ) + + # Tell the "resource tracker" thing to fuck off. + class ManTracker(mantracker.ResourceTracker): + def register(self, name, rtype): + pass + + def unregister(self, name, rtype): + pass + + def ensure_running(self): + pass + + # "know your land and know your prey" + # https://www.dailymotion.com/video/x6ozzco + mantracker._resource_tracker = ManTracker() + mantracker.register = mantracker._resource_tracker.register + mantracker.ensure_running = mantracker._resource_tracker.ensure_running + mantracker.unregister = mantracker._resource_tracker.unregister + mantracker.getfd = mantracker._resource_tracker.getfd + + # use std type verbatim + shmT = SharedMemory + + return shmT diff --git a/tractor/ipc/_ringbuf.py b/tractor/ipc/_ringbuf.py new file mode 100644 index 00000000..6337eea1 --- /dev/null +++ b/tractor/ipc/_ringbuf.py @@ -0,0 +1,253 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +IPC Reliable RingBuffer implementation + +''' +from __future__ import annotations +from contextlib import contextmanager as cm +from multiprocessing.shared_memory import SharedMemory + +import trio +from msgspec import ( + Struct, + to_builtins +) + +from ._linux import ( + EFD_NONBLOCK, + open_eventfd, + EventFD +) +from ._mp_bs import disable_mantracker + + +disable_mantracker() + + +class RBToken(Struct, frozen=True): + ''' + RingBuffer token contains necesary info to open the two + eventfds and the shared memory + + ''' + shm_name: str + write_eventfd: int + wrap_eventfd: int + buf_size: int + + def as_msg(self): + return to_builtins(self) + + @classmethod + def from_msg(cls, msg: dict) -> RBToken: + if isinstance(msg, RBToken): + return msg + + return RBToken(**msg) + + +@cm +def open_ringbuf( + shm_name: str, + buf_size: int = 10 * 1024, + write_efd_flags: int = 0, + wrap_efd_flags: int = 0 +) -> RBToken: + shm = SharedMemory( + name=shm_name, + size=buf_size, + create=True + ) + try: + token = RBToken( + shm_name=shm_name, + write_eventfd=open_eventfd(flags=write_efd_flags), + wrap_eventfd=open_eventfd(flags=wrap_efd_flags), + buf_size=buf_size + ) + yield token + + finally: + shm.unlink() + + +class RingBuffSender(trio.abc.SendStream): + ''' + IPC Reliable Ring Buffer sender side implementation + + `eventfd(2)` is used for wrap around sync, and also to signal + writes to the reader. + + ''' + def __init__( + self, + token: RBToken, + start_ptr: int = 0, + ): + token = RBToken.from_msg(token) + self._shm = SharedMemory( + name=token.shm_name, + size=token.buf_size, + create=False + ) + self._write_event = EventFD(token.write_eventfd, 'w') + self._wrap_event = EventFD(token.wrap_eventfd, 'r') + self._ptr = start_ptr + + @property + def key(self) -> str: + return self._shm.name + + @property + def size(self) -> int: + return self._shm.size + + @property + def ptr(self) -> int: + return self._ptr + + @property + def write_fd(self) -> int: + return self._write_event.fd + + @property + def wrap_fd(self) -> int: + return self._wrap_event.fd + + async def send_all(self, data: bytes | bytearray | memoryview): + # while data is larger than the remaining buf + target_ptr = self.ptr + len(data) + while target_ptr > self.size: + # write all bytes that fit + remaining = self.size - self.ptr + self._shm.buf[self.ptr:] = data[:remaining] + # signal write and wait for reader wrap around + self._write_event.write(remaining) + await self._wrap_event.read() + + # wrap around and trim already written bytes + self._ptr = 0 + data = data[remaining:] + target_ptr = self._ptr + len(data) + + # remaining data fits on buffer + self._shm.buf[self.ptr:target_ptr] = data + self._write_event.write(len(data)) + self._ptr = target_ptr + + async def wait_send_all_might_not_block(self): + raise NotImplementedError + + async def aclose(self): + self._write_event.close() + self._wrap_event.close() + self._shm.close() + + async def __aenter__(self): + self._write_event.open() + self._wrap_event.open() + return self + + +class RingBuffReceiver(trio.abc.ReceiveStream): + ''' + IPC Reliable Ring Buffer receiver side implementation + + `eventfd(2)` is used for wrap around sync, and also to signal + writes to the reader. + + ''' + def __init__( + self, + token: RBToken, + start_ptr: int = 0, + flags: int = 0 + ): + token = RBToken.from_msg(token) + self._shm = SharedMemory( + name=token.shm_name, + size=token.buf_size, + create=False + ) + self._write_event = EventFD(token.write_eventfd, 'w') + self._wrap_event = EventFD(token.wrap_eventfd, 'r') + self._ptr = start_ptr + self._flags = flags + + @property + def key(self) -> str: + return self._shm.name + + @property + def size(self) -> int: + return self._shm.size + + @property + def ptr(self) -> int: + return self._ptr + + @property + def write_fd(self) -> int: + return self._write_event.fd + + @property + def wrap_fd(self) -> int: + return self._wrap_event.fd + + async def receive_some( + self, + max_bytes: int | None = None, + nb_timeout: float = 0.1 + ) -> memoryview: + # if non blocking eventfd enabled, do polling + # until next write, this allows signal handling + if self._flags | EFD_NONBLOCK: + delta = None + while delta is None: + try: + delta = await self._write_event.read() + + except OSError as e: + if e.errno == 'EAGAIN': + continue + + raise e + + else: + delta = await self._write_event.read() + + # fetch next segment and advance ptr + next_ptr = self._ptr + delta + segment = self._shm.buf[self._ptr:next_ptr] + self._ptr = next_ptr + + if self.ptr == self.size: + # reached the end, signal wrap around + self._ptr = 0 + self._wrap_event.write(1) + + return segment + + async def aclose(self): + self._write_event.close() + self._wrap_event.close() + self._shm.close() + + async def __aenter__(self): + self._write_event.open() + self._wrap_event.open() + return self diff --git a/tractor/ipc/_server.py b/tractor/ipc/_server.py new file mode 100644 index 00000000..55374b0a --- /dev/null +++ b/tractor/ipc/_server.py @@ -0,0 +1,1186 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +High-level "IPC server" encapsulation for all your +multi-transport-protcol needs! + +''' +from __future__ import annotations +from collections import defaultdict +from contextlib import ( + asynccontextmanager as acm, +) +from functools import partial +from itertools import chain +import inspect +import textwrap +from types import ( + ModuleType, +) +from typing import ( + Callable, + TYPE_CHECKING, +) + +import trio +from trio import ( + EventStatistics, + Nursery, + TaskStatus, + SocketListener, +) + +from ..devx.pformat import ( + ppfmt, + nest_from_op, +) +from .._exceptions import ( + TransportClosed, +) +from .. import _rpc +from ..msg import ( + MsgType, + Struct, + types as msgtypes, +) +from ..trionics import maybe_open_nursery +from .. import ( + _state, + log, +) +from .._addr import Address +from ._chan import Channel +from ._transport import MsgTransport +from ._uds import UDSAddress +from ._tcp import TCPAddress + +if TYPE_CHECKING: + from .._runtime import Actor + from .._supervise import ActorNursery + + +log = log.get_logger(__name__) + + +async def maybe_wait_on_canced_subs( + uid: tuple[str, str], + chan: Channel, + disconnected: bool, + + actor: Actor|None = None, + chan_drain_timeout: float = 0.5, + an_exit_timeout: float = 0.5, + +) -> ActorNursery|None: + ''' + When a process-local actor-nursery is found for the given actor + `uid` (i.e. that peer is **also** a subactor of this parent), we + attempt to (with timeouts) wait on, + + - all IPC msgs to drain on the (common) `Channel` such that all + local `Context`-parent-tasks can also gracefully collect + `ContextCancelled` msgs from their respective remote children + vs. a `chan_drain_timeout`. + + - the actor-nursery to cancel-n-join all its supervised children + (processes) *gracefully* vs. a `an_exit_timeout` and thus also + detect cases where the IPC transport connection broke but + a sub-process is detected as still alive (a case that happens + when the subactor is still in an active debugger REPL session). + + If the timeout expires in either case we ofc report with warning. + + ''' + actor = actor or _state.current_actor() + + # XXX running outside actor-runtime usage, + # - unit testing + # - possibly manual usage (eventually) ? + if not actor: + return None + + local_nursery: ( + ActorNursery|None + ) = actor._actoruid2nursery.get(uid) + + # This is set in `Portal.cancel_actor()`. So if + # the peer was cancelled we try to wait for them + # to tear down their side of the connection before + # moving on with closing our own side. + if ( + local_nursery + and ( + actor._cancel_called + or + chan._cancel_called + ) + # + # ^-TODO-^ along with this is there another condition + # that we should filter with to avoid entering this + # waiting block needlessly? + # -[ ] maybe `and local_nursery.cancelled` and/or + # only if the `._children` table is empty or has + # only `Portal`s with .chan._cancel_called == + # True` as per what we had below; the MAIN DIFF + # BEING that just bc one `Portal.cancel_actor()` + # was called, doesn't mean the whole actor-nurse + # is gonna exit any time soon right!? + # + # or + # all(chan._cancel_called for chan in chans) + + ): + log.cancel( + 'Waiting on cancel request to peer\n' + f'c)=> {chan.aid.reprol()}@[{chan.maddr}]\n' + ) + + # XXX: this is a soft wait on the channel (and its + # underlying transport protocol) to close from the + # remote peer side since we presume that any channel + # which is mapped to a sub-actor (i.e. it's managed + # by local actor-nursery) has a message that is sent + # to the peer likely by this actor (which may be in + # a shutdown sequence due to cancellation) when the + # local runtime here is now cancelled while + # (presumably) in the middle of msg loop processing. + chan_info: str = ( + f'{chan.aid}\n' + f'|_{chan}\n' + f' |_{chan.transport}\n\n' + ) + with trio.move_on_after(chan_drain_timeout) as drain_cs: + drain_cs.shield = True + + # attempt to wait for the far end to close the + # channel and bail after timeout (a 2-generals + # problem on closure). + assert chan.transport + async for msg in chan.transport.drain(): + + # try to deliver any lingering msgs + # before we destroy the channel. + # This accomplishes deterministic + # ``Portal.cancel_actor()`` cancellation by + # making sure any RPC response to that call is + # delivered the local calling task. + # TODO: factor this into a helper? + log.warning( + 'Draining msg from disconnected peer\n' + f'{chan_info}' + f'{ppfmt(msg)}\n' + ) + # cid: str|None = msg.get('cid') + cid: str|None = msg.cid + if cid: + # deliver response to local caller/waiter + await actor._deliver_ctx_payload( + chan, + cid, + msg, + ) + if drain_cs.cancelled_caught: + log.warning( + 'Timed out waiting on IPC transport channel to drain?\n' + f'{chan_info}' + ) + + # XXX NOTE XXX when no explicit call to + # `open_root_actor()` was made by the application + # (normally we implicitly make that call inside + # the first `.open_nursery()` in root-actor + # user/app code), we can assume that either we + # are NOT the root actor or are root but the + # runtime was started manually. and thus DO have + # to wait for the nursery-enterer to exit before + # shutting down the local runtime to avoid + # clobbering any ongoing subactor + # teardown/debugging/graceful-cancel. + # + # see matching note inside `._supervise.open_nursery()` + # + # TODO: should we have a separate cs + timeout + # block here? + if ( + # XXX SO either, + # - not root OR, + # - is root but `open_root_actor()` was + # entered manually (in which case we do + # the equiv wait there using the + # `devx.debug` sub-sys APIs). + not local_nursery._implicit_runtime_started + ): + log.runtime( + 'Waiting on local actor nursery to exit..\n' + f'|_{local_nursery}\n' + ) + with trio.move_on_after(an_exit_timeout) as an_exit_cs: + an_exit_cs.shield = True + await local_nursery.exited.wait() + + # TODO: currently this is always triggering for every + # sub-daemon spawned from the `piker.services._mngr`? + # -[ ] how do we ensure that the IPC is supposed to + # be long lived and isn't just a register? + # |_ in the register case how can we signal that the + # ephemeral msg loop was intentional? + if ( + # not local_nursery._implicit_runtime_started + # and + an_exit_cs.cancelled_caught + ): + report: str = ( + 'Timed out waiting on local actor-nursery to exit?\n' + f'c)>\n' + f' |_{local_nursery}\n' + ) + if children := local_nursery._children: + # indent from above local-nurse repr + report += ( + f' |_{ppfmt(children)}\n' + ) + + log.warning(report) + + if disconnected: + # if the transport died and this actor is still + # registered within a local nursery, we report + # that the IPC layer may have failed + # unexpectedly since it may be the cause of + # other downstream errors. + entry: tuple|None = local_nursery._children.get(uid) + if entry: + proc: trio.Process + _, proc, _ = entry + + if ( + (poll := getattr(proc, 'poll', None)) + and + poll() is None # proc still alive + ): + # TODO: change log level based on + # detecting whether chan was created for + # ephemeral `.register_actor()` request! + # -[ ] also, that should be avoidable by + # re-using any existing chan from the + # `._discovery.get_registry()` call as + # well.. + log.runtime( + f'Peer IPC broke but subproc is alive?\n\n' + + f'<=x {chan.aid.reprol()}@[{chan.maddr}]\n' + f'\n' + f'{proc}\n' + ) + + return local_nursery + +# TODO multi-tpt support with per-proto peer tracking? +# +# -[x] maybe change to mod-func and rename for implied +# multi-transport semantics? +# -[ ] register each stream/tpt/chan with the owning `Endpoint` +# so that we can query per tpt all peer contact infos? +# |_[ ] possibly provide a global viewing via a +# `collections.ChainMap`? +# +async def handle_stream_from_peer( + stream: trio.SocketStream, + + *, + server: IPCServer, + +) -> None: + ''' + Top-level `trio.abc.Stream` (i.e. normally `trio.SocketStream`) + handler-callback as spawn-invoked by `trio.serve_listeners()`. + + Note that each call to this handler is as a spawned task inside + any `IPCServer.listen_on()` passed `stream_handler_tn: Nursery` + such that it is invoked as, + + Endpoint.stream_handler_tn.start_soon( + handle_stream, + stream, + ) + + ''' + server._no_more_peers = trio.Event() # unset by making new + + # TODO, debug_mode tooling for when hackin this lower layer? + # with debug.maybe_open_crash_handler( + # pdb=True, + # ) as boxerr: + + chan = Channel.from_stream(stream) + con_status: str = ( + f'New inbound IPC transport connection\n' + f'<=( {stream!r}\n' + ) + con_status_steps: str = '' + + # initial handshake with peer phase + try: + if actor := _state.current_actor(): + peer_aid: msgtypes.Aid = await chan._do_handshake( + aid=actor.aid, + ) + except ( + TransportClosed, + # ^XXX NOTE, the above wraps `trio` exc types raised + # during various `SocketStream.send/receive_xx()` calls + # under different fault conditions such as, + # + # trio.BrokenResourceError, + # trio.ClosedResourceError, + # + # Inside our `.ipc._transport` layer we absorb and + # re-raise our own `TransportClosed` exc such that this + # higher level runtime code can only worry one + # "kinda-error" that we expect to tolerate during + # discovery-sys related pings, queires, DoS etc. + ): + # XXX: This may propagate up from `Channel._aiter_recv()` + # and `MsgpackStream._inter_packets()` on a read from the + # stream particularly when the runtime is first starting up + # inside `open_root_actor()` where there is a check for + # a bound listener on the "arbiter" addr. the reset will be + # because the handshake was never meant took place. + log.runtime( + con_status + + + ' -> But failed to handshake? Ignoring..\n' + ) + return + + uid: tuple[str, str] = ( + peer_aid.name, + peer_aid.uuid, + ) + # TODO, can we make this downstream peer tracking use the + # `peer_aid` instead? + familiar: str = 'new-peer' + if _pre_chan := server._peers.get(uid): + familiar: str = 'pre-existing-peer' + uid_short: str = f'{uid[0]}[{uid[1][-6:]}]' + con_status_steps += ( + f' -> Handshake with {familiar} `{uid_short}` complete\n' + ) + + if _pre_chan: + # con_status += ( + # ^TODO^ swap once we minimize conn duplication + # -[ ] last thing might be reg/unreg runtime reqs? + # log.warning( + log.debug( + f'?Wait?\n' + f'We already have IPC with peer {uid_short!r}\n' + f'|_{_pre_chan}\n' + ) + + # IPC connection tracking for both peers and new children: + # - if this is a new channel to a locally spawned + # sub-actor there will be a spawn wait even registered + # by a call to `.wait_for_peer()`. + # - if a peer is connecting no such event will exit. + event: trio.Event|None = server._peer_connected.pop( + uid, + None, + ) + if event: + con_status_steps += ( + ' -> Waking subactor spawn waiters: ' + f'{event.statistics().tasks_waiting}\n' + f' -> Registered IPC chan for child actor {uid}@{chan.raddr}\n' + # f' {event}\n' + # f' |{event.statistics()}\n' + ) + # wake tasks waiting on this IPC-transport "connect-back" + event.set() + + else: + con_status_steps += ( + f' -> Registered IPC chan for peer actor {uid}@{chan.raddr}\n' + ) # type: ignore + + chans: list[Channel] = server._peers[uid] + # if chans: + # # TODO: re-use channels for new connections instead + # # of always new ones? + # # => will require changing all the discovery funcs.. + + # append new channel + # TODO: can we just use list-ref directly? + chans.append(chan) + + con_status_steps += ' -> Entering RPC msg loop..\n' + log.runtime( + con_status + + + textwrap.indent( + con_status_steps, + prefix=' '*3, # align to first-ln + ) + ) + + # Begin channel management - respond to remote requests and + # process received reponses. + disconnected: bool = False + last_msg: MsgType + try: + ( + disconnected, + last_msg, + ) = await _rpc.process_messages( + chan=chan, + ) + except trio.Cancelled: + log.cancel( + 'IPC transport msg loop was cancelled\n' + f'c)>\n' + f' |_{chan}\n' + ) + raise + + finally: + + # check if there are subs which we should gracefully join at + # both the inter-actor-task and subprocess levels to + # gracefully remote cancel and later disconnect (particularly + # for permitting subs engaged in active debug-REPL sessions). + local_nursery: ActorNursery|None = await maybe_wait_on_canced_subs( + uid=uid, + chan=chan, + disconnected=disconnected, + ) + + # `Channel` teardown and closure sequence + # drop ref to channel so it can be gc-ed and disconnected + # + # -[x]TODO mk this be like + # <=x Channel( + # |_field: blah + # )> + op_repr: str = '<=x ' + chan_repr: str = nest_from_op( + input_op=op_repr, + op_suffix='', + nest_prefix='', + text=chan.pformat(), + nest_indent=len(op_repr)-1, + rm_from_first_ln='<', + ) + + con_teardown_status: str = ( + f'IPC channel disconnect\n' + f'\n' + f'{chan_repr}\n' + f'\n' + ) + + chans.remove(chan) + + # TODO: do we need to be this pedantic? + if not chans: + con_teardown_status += ( + f'-> No more channels with {chan.aid.reprol()!r}\n' + ) + server._peers.pop(uid, None) + + if peers := list(server._peers.values()): + peer_cnt: int = len(peers) + if ( + (first := peers[0][0]) is not chan + and + not disconnected + and + peer_cnt > 1 + ): + con_teardown_status += ( + f'-> Remaining IPC {peer_cnt-1!r} peers:\n' + ) + for chans in server._peers.values(): + first: Channel = chans[0] + if not ( + first is chan + and + disconnected + ): + con_teardown_status += ( + f' |_{first.aid.reprol()!r} -> {len(chans)!r} chans\n' + ) + + # No more channels to other actors (at all) registered + # as connected. + if not server._peers: + con_teardown_status += ( + '-> Signalling no more peer connections!\n' + ) + server._no_more_peers.set() + + # NOTE: block this actor from acquiring the + # debugger-TTY-lock since we have no way to know if we + # cancelled it and further there is no way to ensure the + # lock will be released if acquired due to having no + # more active IPC channels. + if ( + _state.is_root_process() + and + _state.is_debug_mode() + ): + from ..devx import debug + pdb_lock = debug.Lock + pdb_lock._blocked.add(uid) + + # TODO: NEEEDS TO BE TESTED! + # actually, no idea if this ever even enters.. XD + # + # XXX => YES IT DOES, when i was testing ctl-c + # from broken debug TTY locking due to + # msg-spec races on application using RunVar... + if ( + local_nursery + and + (ctx_in_debug := pdb_lock.ctx_in_debug) + and + (pdb_user_uid := ctx_in_debug.chan.aid) + ): + entry: tuple|None = local_nursery._children.get( + tuple(pdb_user_uid) + ) + if entry: + proc: trio.Process + _, proc, _ = entry + + if ( + (poll := getattr(proc, 'poll', None)) + and poll() is None + ): + log.cancel( + 'Root actor reports no-more-peers, BUT\n' + 'a DISCONNECTED child still has the debug ' + 'lock!\n\n' + # f'root uid: {actor.uid}\n' + f'last disconnected child uid: {uid}\n' + f'locking child uid: {pdb_user_uid}\n' + ) + await debug.maybe_wait_for_debugger( + child_in_debug=True + ) + + # TODO: just bc a child's transport dropped + # doesn't mean it's not still using the pdb + # REPL! so, + # -[ ] ideally we can check out child proc + # tree to ensure that its alive (and + # actually using the REPL) before we cancel + # it's lock acquire by doing the below! + # -[ ] create a way to read the tree of each actor's + # grandchildren such that when an + # intermediary parent is cancelled but their + # child has locked the tty, the grandparent + # will not allow the parent to cancel or + # zombie reap the child! see open issue: + # - https://github.com/goodboy/tractor/issues/320 + # ------ - ------ + # if a now stale local task has the TTY lock still + # we cancel it to allow servicing other requests for + # the lock. + if ( + (db_cs := pdb_lock.get_locking_task_cs()) + and not db_cs.cancel_called + and uid == pdb_user_uid + ): + log.critical( + f'STALE DEBUG LOCK DETECTED FOR {uid}' + ) + # TODO: figure out why this breaks tests.. + db_cs.cancel() + + log.runtime(con_teardown_status) + # finally block closure + + +class Endpoint(Struct): + ''' + An instance of an IPC "bound" address where the lifetime of an + "ability to accept connections" and handle the subsequent + sequence-of-packets (maybe oriented as sessions) is determined by + the underlying nursery scope(s). + + ''' + addr: Address + listen_tn: Nursery + stream_handler_tn: Nursery|None = None + + # NOTE, normally filled in by calling `.start_listener()` + _listener: SocketListener|None = None + + # ?TODO, mk stream_handler hook into this ep instance so that we + # always keep track of all `SocketStream` instances per + # listener/ep? + peer_tpts: dict[ + UDSAddress|TCPAddress, # peer addr + MsgTransport, # handle to encoded-msg transport stream + ] = {} + + def pformat( + self, + indent: int = 0, + privates: bool = False, + ) -> str: + type_repr: str = type(self).__name__ + fmtstr: str = ( + # !TODO, always be ns aware! + # f'|_netns: {netns}\n' + f' |.addr: {self.addr!r}\n' + f' |_peers: {len(self.peer_tpts)}\n' + ) + return ( + f'<{type_repr}(\n' + f'{fmtstr}' + f')>' + ) + + async def start_listener(self) -> SocketListener: + tpt_mod: ModuleType = inspect.getmodule(self.addr) + lstnr: SocketListener = await tpt_mod.start_listener( + addr=self.addr, + ) + + # NOTE, for handling the resolved non-0 port for + # TCP/UDP network sockets. + if ( + (unwrapped := lstnr.socket.getsockname()) + != + self.addr.unwrap() + ): + self.addr=self.addr.from_addr(unwrapped) + + self._listener = lstnr + return lstnr + + def close_listener( + self, + ) -> bool: + tpt_mod: ModuleType = inspect.getmodule(self.addr) + closer: Callable = getattr( + tpt_mod, + 'close_listener', + False, + ) + # when no defined closing is implicit! + if not closer: + return True + return closer( + addr=self.addr, + lstnr=self._listener, + ) + + +class Server(Struct): + _parent_tn: Nursery + _stream_handler_tn: Nursery + + # level-triggered sig for whether "no peers are currently + # connected"; field is **always** set to an instance but + # initialized with `.is_set() == True`. + _no_more_peers: trio.Event + + # active eps as allocated by `.listen_on()` + _endpoints: list[Endpoint] = [] + + # connection tracking & mgmt + _peers: defaultdict[ + str, # uaid + list[Channel], # IPC conns from peer + ] = defaultdict(list) + + # events-table with entries registered unset while the local + # actor is waiting on a new actor to inbound connect, often + # a parent waiting on its child just after spawn. + _peer_connected: dict[ + tuple[str, str], + trio.Event, + ] = {} + + # syncs for setup/teardown sequences + # - null when not yet booted, + # - unset when active, + # - set when fully shutdown with 0 eps active. + _shutdown: trio.Event|None = None + + # TODO, maybe just make `._endpoints: list[Endpoint]` and + # provide dict-views onto it? + # @property + # def addrs2eps(self) -> dict[Address, Endpoint]: + # ... + @property + def proto_keys(self) -> list[str]: + return [ + ep.addr.proto_key + for ep in self._endpoints + ] + + # def cancel_server(self) -> bool: + def cancel( + self, + + # !TODO, suport just shutting down accepting new clients, + # not existing ones! + # only_listeners: str|None = None + + ) -> bool: + ''' + Cancel this IPC transport server nursery thereby + preventing any new inbound IPC connections establishing. + + ''' + if self._parent_tn: + # TODO: obvi a different server type when we eventually + # support some others XD + log.runtime( + f'Cancelling server(s) for tpt-protos\n' + f'{self.proto_keys!r}\n' + ) + self._parent_tn.cancel_scope.cancel() + return True + + log.warning( + 'No IPC server started before cancelling ?' + ) + return False + + async def wait_for_shutdown( + self, + ) -> bool: + if self._shutdown is not None: + await self._shutdown.wait() + else: + tpt_protos: list[str] = [] + ep: Endpoint + for ep in self._endpoints: + tpt_protos.append(ep.addr.proto_key) + + log.warning( + 'Transport server(s) may have been cancelled before started?\n' + f'protos: {tpt_protos!r}\n' + ) + + def len_peers( + self, + ) -> int: + return len([ + chan.connected() + for chan in chain(*self._peers.values()) + ]) + + def has_peers( + self, + check_chans: bool = False, + ) -> bool: + ''' + Predicate for "are there any active peer IPC `Channel`s at the moment?" + + ''' + has_peers: bool = not self._no_more_peers.is_set() + if ( + has_peers + and + check_chans + and + (peer_cnt := self.len_peers()) + ): + has_peers: bool = ( + peer_cnt > 0 + and + has_peers + ) + + return has_peers + + async def wait_for_no_more_peers( + self, + # XXX, should this even be allowed? + # -> i've seen it cause hangs on teardown + # in `test_resource_cache.py` + # _shield: bool = False, + ) -> None: + await self._no_more_peers.wait() + # with trio.CancelScope(shield=_shield): + # await self._no_more_peers.wait() + + async def wait_for_peer( + self, + uid: tuple[str, str], + + ) -> tuple[trio.Event, Channel]: + ''' + Wait for a connection back from a (spawned sub-)actor with + a `uid` using a `trio.Event`. + + Returns a pair of the event and the "last" registered IPC + `Channel` for the peer with `uid`. + + ''' + log.debug(f'Waiting for peer {uid!r} to connect') + event: trio.Event = self._peer_connected.setdefault( + uid, + trio.Event(), + ) + await event.wait() + log.debug(f'{uid!r} successfully connected back to us') + mru_chan: Channel = self._peers[uid][-1] + return ( + event, + mru_chan, + ) + + @property + def addrs(self) -> list[Address]: + return [ep.addr for ep in self._endpoints] + + @property + def accept_addrs(self) -> list[str, str|int]: + ''' + The `list` of `Address.unwrap()`-ed active IPC endpoint addrs. + + ''' + return [ep.addr.unwrap() for ep in self._endpoints] + + def epsdict(self) -> dict[ + Address, + Endpoint, + ]: + return { + ep.addr: ep + for ep in self._endpoints + } + + def is_shutdown(self) -> bool: + if (ev := self._shutdown) is None: + return False + + return ev.is_set() + + @property + def repr_state(self) -> str: + ''' + A `str`-status describing the current state of this + IPC server in terms of the current operating "phase". + + ''' + status = 'server is active' + if self.has_peers(): + peer_cnt: int = self.len_peers() + status: str = ( + f'{peer_cnt!r} peer chans' + ) + else: + status: str = 'No peer chans' + + if self.is_shutdown(): + status: str = 'server-shutdown' + + return status + + def pformat( + self, + privates: bool = False, + ) -> str: + eps: list[Endpoint] = self._endpoints + + # state_repr: str = ( + # f'{len(eps)!r} endpoints active' + # ) + fmtstr = ( + f' |_state: {self.repr_state!r}\n' + ) + if privates: + fmtstr += f' no_more_peers: {self.has_peers()}\n' + + if self._shutdown is not None: + shutdown_stats: EventStatistics = self._shutdown.statistics() + fmtstr += ( + f' task_waiting_on_shutdown: {shutdown_stats}\n' + ) + + if eps := self._endpoints: + addrs: list[tuple] = [ + ep.addr for ep in eps + ] + repr_eps: str = ppfmt(addrs) + + fmtstr += ( + f' |_endpoints: {repr_eps}\n' + # ^TODO? how to indent closing ']'.. + ) + + if peers := self._peers: + fmtstr += ( + f' |_peers: {len(peers)} connected\n' + ) + + return ( + f'\n' + ) + + __repr__ = pformat + + # TODO? maybe allow shutting down a `.listen_on()`s worth of + # listeners by cancelling the corresponding + # `Endpoint._listen_tn` only ? + # -[ ] in theory you could use this to + # "boot-and-wait-for-reconnect" of all current and connecting + # peers? + # |_ would require that the stream-handler is intercepted so we + # can intercept every `MsgTransport` (stream) and track per + # `Endpoint` likely? + # + # async def unlisten( + # self, + # listener: SocketListener, + # ) -> bool: + # ... + + async def listen_on( + self, + *, + accept_addrs: list[tuple[str, int|str]]|None = None, + stream_handler_nursery: Nursery|None = None, + ) -> list[Endpoint]: + ''' + Start `SocketListeners` (i.e. bind and call `socket.listen()`) + for all IPC-transport-protocol specific `Address`-types + in `accept_addrs`. + + ''' + from .._addr import ( + default_lo_addrs, + wrap_address, + ) + if accept_addrs is None: + accept_addrs = default_lo_addrs([ + _state._def_tpt_proto + ]) + + else: + accept_addrs: list[Address] = [ + wrap_address(a) for a in accept_addrs + ] + + if self._shutdown is None: + self._shutdown = trio.Event() + + elif self.is_shutdown(): + raise RuntimeError( + f'IPC server has already terminated ?\n' + f'{self}\n' + ) + + log.runtime( + f'Binding endpoints\n' + f'{ppfmt(accept_addrs)}\n' + ) + eps: list[Endpoint] = await self._parent_tn.start( + partial( + _serve_ipc_eps, + server=self, + stream_handler_tn=( + stream_handler_nursery + or + self._stream_handler_tn + ), + listen_addrs=accept_addrs, + ) + ) + self._endpoints.extend(eps) + + serv_repr: str = nest_from_op( + input_op='(>', + text=self.pformat(), + nest_indent=1, + ) + log.runtime( + f'Started IPC server\n' + f'{serv_repr}' + ) + + # XXX, a little sanity on new ep allocations + group_tn: Nursery|None = None + ep: Endpoint + for ep in eps: + if ep.addr not in self.addrs: + breakpoint() + + if group_tn is None: + group_tn = ep.listen_tn + else: + assert group_tn is ep.listen_tn + + return eps + + +# alias until we decide on final naming +IPCServer = Server + + +async def _serve_ipc_eps( + *, + server: IPCServer, + stream_handler_tn: Nursery, + listen_addrs: list[tuple[str, int|str]], + + task_status: TaskStatus[ + Nursery, + ] = trio.TASK_STATUS_IGNORED, +) -> None: + ''' + Start IPC transport server(s) for the actor, begin + listening/accepting new `trio.SocketStream` connections + from peer actors via a `SocketListener`. + + This will cause an actor to continue living (and thus + blocking at the process/OS-thread level) until + `.cancel_server()` is called. + + ''' + try: + listen_tn: Nursery + async with trio.open_nursery() as listen_tn: + + eps: list[Endpoint] = [] + # XXX NOTE, required to call `serve_listeners()` below. + # ?TODO, maybe just pass `list(eps.values()` tho? + listeners: list[trio.abc.Listener] = [] + for addr in listen_addrs: + ep = Endpoint( + addr=addr, + listen_tn=listen_tn, + stream_handler_tn=stream_handler_tn, + ) + try: + ep_sclang: str = nest_from_op( + input_op='>[', + text=f'{ep.pformat()}', + ) + log.runtime( + f'Starting new endpoint listener\n' + f'{ep_sclang}\n' + ) + listener: trio.abc.Listener = await ep.start_listener() + assert listener is ep._listener + # actor = _state.current_actor() + # if actor.is_registry: + # import pdbp; pdbp.set_trace() + + except OSError as oserr: + if ( + '[Errno 98] Address already in use' + in + oserr.args#[0] + ): + log.exception( + f'Address already in use?\n' + f'{addr}\n' + ) + raise + + listeners.append(listener) + eps.append(ep) + + _listeners: list[SocketListener] = await listen_tn.start( + partial( + trio.serve_listeners, + handler=partial( + handle_stream_from_peer, + server=server, + ), + listeners=listeners, + + # NOTE: configured such that new + # connections will stay alive even if + # this server is cancelled! + handler_nursery=stream_handler_tn + ) + ) + task_status.started( + eps, + ) + + finally: + if eps: + addr: Address + ep: Endpoint + for addr, ep in server.epsdict().items(): + ep.close_listener() + server._endpoints.remove(ep) + + # actor = _state.current_actor() + # if actor.is_arbiter: + # import pdbp; pdbp.set_trace() + + # signal the server is "shutdown"/"terminated" + # since no more active endpoints are active. + if not server._endpoints: + server._shutdown.set() + +@acm +async def open_ipc_server( + parent_tn: Nursery|None = None, + stream_handler_tn: Nursery|None = None, + +) -> IPCServer: + + async with maybe_open_nursery( + nursery=parent_tn, + ) as parent_tn: + no_more_peers = trio.Event() + no_more_peers.set() + + ipc_server = IPCServer( + _parent_tn=parent_tn, + _stream_handler_tn=( + stream_handler_tn + or + parent_tn + ), + _no_more_peers=no_more_peers, + ) + try: + yield ipc_server + log.runtime( + 'Server-tn running until terminated\n' + ) + # TODO? when if ever would we want/need this? + # with trio.CancelScope(shield=True): + # await ipc_server.wait_for_shutdown() + + except BaseException as berr: + log.exception( + 'IPC server caller crashed ??' + ) + # ?TODO, maybe we can ensure the endpoints are torndown + # (and thus their managed listeners) beforehand to ensure + # super graceful RPC mechanics? + # + # -[ ] but aren't we doing that already per-`listen_tn` + # inside `_serve_ipc_eps()` above? + # + # ipc_server.cancel() + raise berr diff --git a/tractor/ipc/_shm.py b/tractor/ipc/_shm.py new file mode 100644 index 00000000..2360f893 --- /dev/null +++ b/tractor/ipc/_shm.py @@ -0,0 +1,825 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +""" +SC friendly shared memory management geared at real-time +processing. + +Support for ``numpy`` compatible array-buffers is provided but is +considered optional within the context of this runtime-library. + +""" +from __future__ import annotations +from multiprocessing import shared_memory as shm +from multiprocessing.shared_memory import ( + # SharedMemory, + ShareableList, +) +import platform +from sys import byteorder +import time +from typing import Optional + +from msgspec import ( + Struct, + to_builtins +) +import tractor + +from tractor.ipc._mp_bs import disable_mantracker +from tractor.log import get_logger + + +_USE_POSIX = getattr(shm, '_USE_POSIX', False) +if _USE_POSIX: + from _posixshmem import shm_unlink + + +try: + import numpy as np + from numpy.lib import recfunctions as rfn + # TODO ruff complains with, + # warning| F401: `nptyping` imported but unused; consider using + # `importlib.util.find_spec` to test for availability + import nptyping # noqa +except ImportError: + pass + + +log = get_logger(__name__) + + +SharedMemory = disable_mantracker() + + +class SharedInt: + ''' + Wrapper around a single entry shared memory array which + holds an ``int`` value used as an index counter. + + ''' + def __init__( + self, + shm: SharedMemory, + ) -> None: + self._shm = shm + + @property + def value(self) -> int: + return int.from_bytes(self._shm.buf, byteorder) + + @value.setter + def value(self, value) -> None: + self._shm.buf[:] = value.to_bytes(self._shm.size, byteorder) + + def destroy(self) -> None: + if _USE_POSIX: + # We manually unlink to bypass all the "resource tracker" + # nonsense meant for non-SC systems. + name = self._shm.name + try: + shm_unlink(name) + except FileNotFoundError: + # might be a teardown race here? + log.warning(f'Shm for {name} already unlinked?') + + +class NDToken(Struct, frozen=True): + ''' + Internal represenation of a shared memory ``numpy`` array "token" + which can be used to key and load a system (OS) wide shm entry + and correctly read the array by type signature. + + This type is msg safe. + + ''' + shm_name: str # this servers as a "key" value + shm_first_index_name: str + shm_last_index_name: str + dtype_descr: tuple + size: int # in struct-array index / row terms + + # TODO: use nptyping here on dtypes + @property + def dtype(self) -> list[tuple[str, str, tuple[int, ...]]]: + return np.dtype( + list( + map(tuple, self.dtype_descr) + ) + ).descr + + def as_msg(self): + return to_builtins(self) + + @classmethod + def from_msg(cls, msg: dict) -> NDToken: + if isinstance(msg, NDToken): + return msg + + # TODO: native struct decoding + # return _token_dec.decode(msg) + + msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr'])) + return NDToken(**msg) + + +# _token_dec = msgspec.msgpack.Decoder(NDToken) + +# TODO: this api? +# _known_tokens = tractor.ActorVar('_shm_tokens', {}) +# _known_tokens = tractor.ContextStack('_known_tokens', ) +# _known_tokens = trio.RunVar('shms', {}) + +# TODO: this should maybe be provided via +# a `.trionics.maybe_open_context()` wrapper factory? +# process-local store of keys to tokens +_known_tokens: dict[str, NDToken] = {} + + +def get_shm_token(key: str) -> NDToken | None: + ''' + Convenience func to check if a token + for the provided key is known by this process. + + Returns either the ``numpy`` token or a string for a shared list. + + ''' + return _known_tokens.get(key) + + +def _make_token( + key: str, + size: int, + dtype: np.dtype, + +) -> NDToken: + ''' + Create a serializable token that can be used + to access a shared array. + + ''' + return NDToken( + shm_name=key, + shm_first_index_name=key + "_first", + shm_last_index_name=key + "_last", + dtype_descr=tuple(np.dtype(dtype).descr), + size=size, + ) + + +class ShmArray: + ''' + A shared memory ``numpy.ndarray`` API. + + An underlying shared memory buffer is allocated based on + a user specified ``numpy.ndarray``. This fixed size array + can be read and written to by pushing data both onto the "front" + or "back" of a set index range. The indexes for the "first" and + "last" index are themselves stored in shared memory (accessed via + ``SharedInt`` interfaces) values such that multiple processes can + interact with the same array using a synchronized-index. + + ''' + def __init__( + self, + shmarr: np.ndarray, + first: SharedInt, + last: SharedInt, + shm: SharedMemory, + # readonly: bool = True, + ) -> None: + self._array = shmarr + + # indexes for first and last indices corresponding + # to fille data + self._first = first + self._last = last + + self._len = len(shmarr) + self._shm = shm + self._post_init: bool = False + + # pushing data does not write the index (aka primary key) + self._write_fields: list[str] | None = None + dtype = shmarr.dtype + if dtype.fields: + self._write_fields = list(shmarr.dtype.fields.keys())[1:] + + # TODO: ringbuf api? + + @property + def _token(self) -> NDToken: + return NDToken( + shm_name=self._shm.name, + shm_first_index_name=self._first._shm.name, + shm_last_index_name=self._last._shm.name, + dtype_descr=tuple(self._array.dtype.descr), + size=self._len, + ) + + @property + def token(self) -> dict: + """Shared memory token that can be serialized and used by + another process to attach to this array. + """ + return self._token.as_msg() + + @property + def index(self) -> int: + return self._last.value % self._len + + @property + def array(self) -> np.ndarray: + ''' + Return an up-to-date ``np.ndarray`` view of the + so-far-written data to the underlying shm buffer. + + ''' + a = self._array[self._first.value:self._last.value] + + # first, last = self._first.value, self._last.value + # a = self._array[first:last] + + # TODO: eventually comment this once we've not seen it in the + # wild in a long time.. + # XXX: race where first/last indexes cause a reader + # to load an empty array.. + if len(a) == 0 and self._post_init: + raise RuntimeError('Empty array race condition hit!?') + # breakpoint() + + return a + + def ustruct( + self, + fields: Optional[list[str]] = None, + + # type that all field values will be cast to + # in the returned view. + common_dtype: np.dtype = float, + + ) -> np.ndarray: + + array = self._array + + if fields: + selection = array[fields] + # fcount = len(fields) + else: + selection = array + # fcount = len(array.dtype.fields) + + # XXX: manual ``.view()`` attempt that also doesn't work. + # uview = selection.view( + # dtype=' np.ndarray: + ''' + Return the last ``length``'s worth of ("row") entries from the + array. + + ''' + return self.array[-length:] + + def push( + self, + data: np.ndarray, + + field_map: Optional[dict[str, str]] = None, + prepend: bool = False, + update_first: bool = True, + start: int | None = None, + + ) -> int: + ''' + Ring buffer like "push" to append data + into the buffer and return updated "last" index. + + NB: no actual ring logic yet to give a "loop around" on overflow + condition, lel. + + ''' + length = len(data) + + if prepend: + index = (start or self._first.value) - length + + if index < 0: + raise ValueError( + f'Array size of {self._len} was overrun during prepend.\n' + f'You have passed {abs(index)} too many datums.' + ) + + else: + index = start if start is not None else self._last.value + + end = index + length + + if field_map: + src_names, dst_names = zip(*field_map.items()) + else: + dst_names = src_names = self._write_fields + + try: + self._array[ + list(dst_names) + ][index:end] = data[list(src_names)][:] + + # NOTE: there was a race here between updating + # the first and last indices and when the next reader + # tries to access ``.array`` (which due to the index + # overlap will be empty). Pretty sure we've fixed it now + # but leaving this here as a reminder. + if ( + prepend + and update_first + and length + ): + assert index < self._first.value + + if ( + index < self._first.value + and update_first + ): + assert prepend, 'prepend=True not passed but index decreased?' + self._first.value = index + + elif not prepend: + self._last.value = end + + self._post_init = True + return end + + except ValueError as err: + if field_map: + raise + + # should raise if diff detected + self.diff_err_fields(data) + raise err + + def diff_err_fields( + self, + data: np.ndarray, + ) -> None: + # reraise with any field discrepancy + our_fields, their_fields = ( + set(self._array.dtype.fields), + set(data.dtype.fields), + ) + + only_in_ours = our_fields - their_fields + only_in_theirs = their_fields - our_fields + + if only_in_ours: + raise TypeError( + f"Input array is missing field(s): {only_in_ours}" + ) + elif only_in_theirs: + raise TypeError( + f"Input array has unknown field(s): {only_in_theirs}" + ) + + # TODO: support "silent" prepends that don't update ._first.value? + def prepend( + self, + data: np.ndarray, + ) -> int: + end = self.push(data, prepend=True) + assert end + + def close(self) -> None: + self._first._shm.close() + self._last._shm.close() + self._shm.close() + + def destroy(self) -> None: + if _USE_POSIX: + # We manually unlink to bypass all the "resource tracker" + # nonsense meant for non-SC systems. + shm_unlink(self._shm.name) + + self._first.destroy() + self._last.destroy() + + def flush(self) -> None: + # TODO: flush to storage backend like markestore? + ... + + +def open_shm_ndarray( + size: int, + key: str | None = None, + dtype: np.dtype | None = None, + append_start_index: int | None = None, + readonly: bool = False, + +) -> ShmArray: + ''' + Open a memory shared ``numpy`` using the standard library. + + This call unlinks (aka permanently destroys) the buffer on teardown + and thus should be used from the parent-most accessor (process). + + ''' + # create new shared mem segment for which we + # have write permission + a = np.zeros(size, dtype=dtype) + a['index'] = np.arange(len(a)) + + shm = SharedMemory( + name=key, + create=True, + size=a.nbytes + ) + array = np.ndarray( + a.shape, + dtype=a.dtype, + buffer=shm.buf + ) + array[:] = a[:] + array.setflags(write=int(not readonly)) + + token = _make_token( + key=key, + size=size, + dtype=dtype, + ) + + # create single entry arrays for storing an first and last indices + first = SharedInt( + shm=SharedMemory( + name=token.shm_first_index_name, + create=True, + size=4, # std int + ) + ) + + last = SharedInt( + shm=SharedMemory( + name=token.shm_last_index_name, + create=True, + size=4, # std int + ) + ) + + # Start the "real-time" append-updated (or "pushed-to") section + # after some start index: ``append_start_index``. This allows appending + # from a start point in the array which isn't the 0 index and looks + # something like, + # ------------------------- + # | | i + # _________________________ + # <-------------> <-------> + # history real-time + # + # Once fully "prepended", the history section will leave the + # ``ShmArray._start.value: int = 0`` and the yet-to-be written + # real-time section will start at ``ShmArray.index: int``. + + # this sets the index to nearly 2/3rds into the the length of + # the buffer leaving at least a "days worth of second samples" + # for the real-time section. + if append_start_index is None: + append_start_index = round(size * 0.616) + + last.value = first.value = append_start_index + + shmarr = ShmArray( + array, + first, + last, + shm, + ) + + assert shmarr._token == token + _known_tokens[key] = shmarr.token + + # "unlink" created shm on process teardown by + # pushing teardown calls onto actor context stack + stack = tractor.current_actor().lifetime_stack + stack.callback(shmarr.close) + stack.callback(shmarr.destroy) + + return shmarr + + +def attach_shm_ndarray( + token: tuple[str, str, tuple[str, str]], + readonly: bool = True, + +) -> ShmArray: + ''' + Attach to an existing shared memory array previously + created by another process using ``open_shared_array``. + + No new shared mem is allocated but wrapper types for read/write + access are constructed. + + ''' + token = NDToken.from_msg(token) + key = token.shm_name + + if key in _known_tokens: + assert NDToken.from_msg(_known_tokens[key]) == token, "WTF" + + # XXX: ugh, looks like due to the ``shm_open()`` C api we can't + # actually place files in a subdir, see discussion here: + # https://stackoverflow.com/a/11103289 + + # attach to array buffer and view as per dtype + _err: Optional[Exception] = None + for _ in range(3): + try: + shm = SharedMemory( + name=key, + create=False, + ) + break + except OSError as oserr: + _err = oserr + time.sleep(0.1) + else: + if _err: + raise _err + + shmarr = np.ndarray( + (token.size,), + dtype=token.dtype, + buffer=shm.buf + ) + shmarr.setflags(write=int(not readonly)) + + first = SharedInt( + shm=SharedMemory( + name=token.shm_first_index_name, + create=False, + size=4, # std int + ), + ) + last = SharedInt( + shm=SharedMemory( + name=token.shm_last_index_name, + create=False, + size=4, # std int + ), + ) + + # make sure we can read + first.value + + sha = ShmArray( + shmarr, + first, + last, + shm, + ) + # read test + sha.array + + # Stash key -> token knowledge for future queries + # via `maybe_opepn_shm_array()` but only after we know + # we can attach. + if key not in _known_tokens: + _known_tokens[key] = token + + # "close" attached shm on actor teardown + tractor.current_actor().lifetime_stack.callback(sha.close) + + return sha + + +def maybe_open_shm_ndarray( + key: str, # unique identifier for segment + size: int, + dtype: np.dtype | None = None, + append_start_index: int = 0, + readonly: bool = True, + +) -> tuple[ShmArray, bool]: + ''' + Attempt to attach to a shared memory block using a "key" lookup + to registered blocks in the users overall "system" registry + (presumes you don't have the block's explicit token). + + This function is meant to solve the problem of discovering whether + a shared array token has been allocated or discovered by the actor + running in **this** process. Systems where multiple actors may seek + to access a common block can use this function to attempt to acquire + a token as discovered by the actors who have previously stored + a "key" -> ``NDToken`` map in an actor local (aka python global) + variable. + + If you know the explicit ``NDToken`` for your memory segment instead + use ``attach_shm_array``. + + ''' + try: + # see if we already know this key + token = _known_tokens[key] + return ( + attach_shm_ndarray( + token=token, + readonly=readonly, + ), + False, # not newly opened + ) + except KeyError: + log.warning(f"Could not find {key} in shms cache") + if dtype: + token = _make_token( + key, + size=size, + dtype=dtype, + ) + else: + + try: + return ( + attach_shm_ndarray( + token=token, + readonly=readonly, + ), + False, + ) + except FileNotFoundError: + log.warning(f"Could not attach to shm with token {token}") + + # This actor does not know about memory + # associated with the provided "key". + # Attempt to open a block and expect + # to fail if a block has been allocated + # on the OS by someone else. + return ( + open_shm_ndarray( + key=key, + size=size, + dtype=dtype, + append_start_index=append_start_index, + readonly=readonly, + ), + True, + ) + + +class ShmList(ShareableList): + ''' + Carbon copy of ``.shared_memory.ShareableList`` with a few + enhancements: + + - readonly mode via instance var flag `._readonly: bool` + - ``.__getitem__()`` accepts ``slice`` inputs + - exposes the underlying buffer "name" as a ``.key: str`` + + ''' + def __init__( + self, + sequence: list | None = None, + *, + name: str | None = None, + readonly: bool = True + + ) -> None: + self._readonly = readonly + self._key = name + return super().__init__( + sequence=sequence, + name=name, + ) + + @property + def key(self) -> str: + return self._key + + @property + def readonly(self) -> bool: + return self._readonly + + def __setitem__( + self, + position, + value, + + ) -> None: + + # mimick ``numpy`` error + if self._readonly: + raise ValueError('assignment destination is read-only') + + return super().__setitem__(position, value) + + def __getitem__( + self, + indexish, + ) -> list: + + # NOTE: this is a non-writeable view (copy?) of the buffer + # in a new list instance. + if isinstance(indexish, slice): + return list(self)[indexish] + + return super().__getitem__(indexish) + + # TODO: should we offer a `.array` and `.push()` equivalent + # to the `ShmArray`? + # currently we have the following limitations: + # - can't write slices of input using traditional slice-assign + # syntax due to the ``ShareableList.__setitem__()`` implementation. + # - ``list(shmlist)`` returns a non-mutable copy instead of + # a writeable view which would be handier numpy-style ops. + + +def open_shm_list( + key: str, + sequence: list | None = None, + size: int = int(2 ** 10), + dtype: float | int | bool | str | bytes | None = float, + readonly: bool = True, + +) -> ShmList: + + if sequence is None: + default = { + float: 0., + int: 0, + bool: True, + str: 'doggy', + None: None, + }[dtype] + sequence = [default] * size + + shml = ShmList( + sequence=sequence, + name=key, + readonly=readonly, + ) + + # TODO, factor into a @actor_fixture acm-API? + # -[ ] also `@maybe_actor_fixture()` which inludes + # the .current_actor() convenience check? + # |_ orr can that just be in the sin-maybe-version? + # + # "close" attached shm on actor teardown + try: + actor = tractor.current_actor() + + actor.lifetime_stack.callback(shml.shm.close) + + # XXX on 3.13+ we don't need to call this? + # -> bc we pass `track=False` for `SharedMemeory` orr? + if ( + platform.python_version_tuple()[:-1] < ('3', '13') + ): + actor.lifetime_stack.callback(shml.shm.unlink) + except RuntimeError: + log.warning('tractor runtime not active, skipping teardown steps') + + return shml + + +def attach_shm_list( + key: str, + readonly: bool = False, + +) -> ShmList: + + return ShmList( + name=key, + readonly=readonly, + ) diff --git a/tractor/ipc/_tcp.py b/tractor/ipc/_tcp.py new file mode 100644 index 00000000..a1f511d5 --- /dev/null +++ b/tractor/ipc/_tcp.py @@ -0,0 +1,254 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +TCP implementation of tractor.ipc._transport.MsgTransport protocol + +''' +from __future__ import annotations +import ipaddress +from typing import ( + ClassVar, +) +# from contextlib import ( +# asynccontextmanager as acm, +# ) + +import msgspec +import trio +from trio import ( + SocketListener, + open_tcp_listeners, +) + +from tractor.msg import MsgCodec +from tractor.log import get_logger +from tractor.ipc._transport import ( + MsgTransport, + MsgpackTransport, +) + + +log = get_logger(__name__) + + +class TCPAddress( + msgspec.Struct, + frozen=True, +): + _host: str + _port: int + + def __post_init__(self): + try: + ipaddress.ip_address(self._host) + except ValueError as valerr: + raise ValueError( + 'Invalid {type(self).__name__}._host = {self._host!r}\n' + ) from valerr + + proto_key: ClassVar[str] = 'tcp' + unwrapped_type: ClassVar[type] = tuple[str, int] + def_bindspace: ClassVar[str] = '127.0.0.1' + + # ?TODO, actually validate ipv4/6 with stdlib's `ipaddress` + @property + def is_valid(self) -> bool: + ''' + Predicate to ensure a valid socket-address pair. + + ''' + return ( + self._port != 0 + and + (ipaddr := ipaddress.ip_address(self._host)) + and not ( + ipaddr.is_reserved + or + ipaddr.is_unspecified + or + ipaddr.is_link_local + or + ipaddr.is_link_local + or + ipaddr.is_multicast + or + ipaddr.is_global + ) + ) + # ^XXX^ see various properties of invalid addrs here, + # https://docs.python.org/3/library/ipaddress.html#ipaddress.IPv4Address + + @property + def bindspace(self) -> str: + return self._host + + @property + def domain(self) -> str: + return self._host + + @classmethod + def from_addr( + cls, + addr: tuple[str, int] + ) -> TCPAddress: + match addr: + case (str(), int()): + return TCPAddress(addr[0], addr[1]) + case _: + raise ValueError( + f'Invalid unwrapped address for {cls}\n' + f'{addr}\n' + ) + + def unwrap(self) -> tuple[str, int]: + return ( + self._host, + self._port, + ) + + @classmethod + def get_random( + cls, + bindspace: str = def_bindspace, + ) -> TCPAddress: + return TCPAddress(bindspace, 0) + + @classmethod + def get_root(cls) -> TCPAddress: + return TCPAddress( + '127.0.0.1', + 1616, + ) + + def __repr__(self) -> str: + return ( + f'{type(self).__name__}[{self.unwrap()}]' + ) + + @classmethod + def get_transport( + cls, + codec: str = 'msgpack', + ) -> MsgTransport: + match codec: + case 'msgspack': + return MsgpackTCPStream + case _: + raise ValueError( + f'No IPC transport with {codec!r} supported !' + ) + + +async def start_listener( + addr: TCPAddress, + **kwargs, +) -> SocketListener: + ''' + Start a TCP socket listener on the given `TCPAddress`. + + ''' + log.runtime( + f'Trying socket bind\n' + f'>[ {addr}\n' + ) + # ?TODO, maybe we should just change the lower-level call this is + # using internall per-listener? + listeners: list[SocketListener] = await open_tcp_listeners( + host=addr._host, + port=addr._port, + **kwargs + ) + # NOTE, for now we don't expect non-singleton-resolving + # domain-addresses/multi-homed-hosts. + # (though it is supported by `open_tcp_listeners()`) + assert len(listeners) == 1 + listener = listeners[0] + host, port = listener.socket.getsockname()[:2] + bound_addr: TCPAddress = type(addr).from_addr((host, port)) + log.info( + f'Listening on TCP socket\n' + f'[> {bound_addr}\n' + ) + return listener + + +# TODO: typing oddity.. not sure why we have to inherit here, but it +# seems to be an issue with `get_msg_transport()` returning +# a `Type[Protocol]`; probably should make a `mypy` issue? +class MsgpackTCPStream(MsgpackTransport): + ''' + A ``trio.SocketStream`` delivering ``msgpack`` formatted data + using the ``msgspec`` codec lib. + + ''' + address_type = TCPAddress + layer_key: int = 4 + + @property + def maddr(self) -> str: + host, port = self.raddr.unwrap() + return ( + # TODO, use `ipaddress` from stdlib to handle + # first detecting which of `ipv4/6` before + # choosing the routing prefix part. + f'/ipv4/{host}' + + f'/{self.address_type.proto_key}/{port}' + # f'/{self.chan.uid[0]}' + # f'/{self.cid}' + + # f'/cid={cid_head}..{cid_tail}' + # TODO: ? not use this ^ right ? + ) + + def connected(self) -> bool: + return self.stream.socket.fileno() != -1 + + @classmethod + async def connect_to( + cls, + destaddr: TCPAddress, + prefix_size: int = 4, + codec: MsgCodec|None = None, + **kwargs + ) -> MsgpackTCPStream: + stream = await trio.open_tcp_stream( + *destaddr.unwrap(), + **kwargs + ) + return MsgpackTCPStream( + stream, + prefix_size=prefix_size, + codec=codec + ) + + @classmethod + def get_stream_addrs( + cls, + stream: trio.SocketStream + ) -> tuple[ + TCPAddress, + TCPAddress, + ]: + # TODO, what types are these? + lsockname = stream.socket.getsockname() + l_sockaddr: tuple[str, int] = tuple(lsockname[:2]) + rsockname = stream.socket.getpeername() + r_sockaddr: tuple[str, int] = tuple(rsockname[:2]) + return ( + TCPAddress.from_addr(l_sockaddr), + TCPAddress.from_addr(r_sockaddr), + ) diff --git a/tractor/ipc/_transport.py b/tractor/ipc/_transport.py new file mode 100644 index 00000000..8c76c8ad --- /dev/null +++ b/tractor/ipc/_transport.py @@ -0,0 +1,536 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +typing.Protocol based generic msg API, implement this class to add +backends for tractor.ipc.Channel + +''' +from __future__ import annotations +from typing import ( + runtime_checkable, + Type, + Protocol, + # TypeVar, + ClassVar, + TYPE_CHECKING, +) +from collections.abc import ( + AsyncGenerator, + AsyncIterator, +) +import struct + +import trio +import msgspec +from tricycle import BufferedReceiveStream + +from tractor.log import get_logger +from tractor._exceptions import ( + MsgTypeError, + TransportClosed, + _mk_send_mte, + _mk_recv_mte, +) +from tractor.msg import ( + _ctxvar_MsgCodec, + # _codec, XXX see `self._codec` sanity/debug checks + MsgCodec, + MsgType, + types as msgtypes, + pretty_struct, +) + +if TYPE_CHECKING: + from tractor._addr import Address + +log = get_logger(__name__) + + +# (codec, transport) +MsgTransportKey = tuple[str, str] + + +# from tractor.msg.types import MsgType +# ?TODO? this should be our `Union[*msgtypes.__spec__]` alias now right..? +# => BLEH, except can't bc prots must inherit typevar or param-spec +# vars.. +# MsgType = TypeVar('MsgType') + + +@runtime_checkable +class MsgTransport(Protocol): +# +# class MsgTransport(Protocol[MsgType]): +# ^-TODO-^ consider using a generic def and indexing with our +# eventual msg definition/types? +# - https://docs.python.org/3/library/typing.html#typing.Protocol + + stream: trio.SocketStream + drained: list[MsgType] + + address_type: ClassVar[Type[Address]] + codec_key: ClassVar[str] + + # XXX: should this instead be called `.sendall()`? + async def send(self, msg: MsgType) -> None: + ... + + async def recv(self) -> MsgType: + ... + + def __aiter__(self) -> MsgType: + ... + + def connected(self) -> bool: + ... + + # defining this sync otherwise it causes a mypy error because it + # can't figure out it's a generator i guess?..? + def drain(self) -> AsyncIterator[dict]: + ... + + @classmethod + def key(cls) -> MsgTransportKey: + return ( + cls.codec_key, + cls.address_type.proto_key, + ) + + @property + def laddr(self) -> Address: + ... + + @property + def raddr(self) -> Address: + ... + + @property + def maddr(self) -> str: + ... + + @classmethod + async def connect_to( + cls, + addr: Address, + **kwargs + ) -> MsgTransport: + ... + + @classmethod + def get_stream_addrs( + cls, + stream: trio.abc.Stream + ) -> tuple[ + Address, # local + Address # remote + ]: + ''' + Return the transport protocol's address pair for the local + and remote-peer side. + + ''' + ... + + # TODO, such that all `.raddr`s for each `SocketStream` are + # delivered? + # -[ ] move `.open_listener()` here and internally track the + # listener set, per address? + # def get_peers( + # self, + # ) -> list[Address]: + # ... + + + +class MsgpackTransport(MsgTransport): + + # TODO: better naming for this? + # -[ ] check how libp2p does naming for such things? + codec_key: str = 'msgpack' + + def __init__( + self, + stream: trio.abc.Stream, + prefix_size: int = 4, + + # XXX optionally provided codec pair for `msgspec`: + # https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types + # + # TODO: define this as a `Codec` struct which can be + # overriden dynamically by the application/runtime? + codec: MsgCodec = None, + + ) -> None: + self.stream = stream + ( + self._laddr, + self._raddr, + ) = self.get_stream_addrs(stream) + + # create read loop instance + self._aiter_pkts = self._iter_packets() + self._send_lock = trio.StrictFIFOLock() + + # public i guess? + self.drained: list[dict] = [] + + self.recv_stream = BufferedReceiveStream( + transport_stream=stream + ) + self.prefix_size = prefix_size + + # allow for custom IPC msg interchange format + # dynamic override Bo + self._task = trio.lowlevel.current_task() + + # XXX for ctxvar debug only! + # self._codec: MsgCodec = ( + # codec + # or + # _codec._ctxvar_MsgCodec.get() + # ) + + async def _iter_packets(self) -> AsyncGenerator[dict, None]: + ''' + Yield `bytes`-blob decoded packets from the underlying TCP + stream using the current task's `MsgCodec`. + + This is a streaming routine implemented as an async generator + func (which was the original design, but could be changed?) + and is allocated by a `.__call__()` inside `.__init__()` where + it is assigned to the `._aiter_pkts` attr. + + ''' + decodes_failed: int = 0 + + tpt_name: str = f'{type(self).__name__!r}' + while True: + try: + header: bytes = await self.recv_stream.receive_exactly(4) + except ( + ValueError, + ConnectionResetError, + + # not sure entirely why we need this but without it we + # seem to be getting racy failures here on + # arbiter/registry name subs.. + trio.BrokenResourceError, + + ) as trans_err: + + loglevel = 'transport' + match trans_err: + # case ( + # ConnectionResetError() + # ): + # loglevel = 'transport' + + # peer actor (graceful??) TCP EOF but `tricycle` + # seems to raise a 0-bytes-read? + case ValueError() if ( + 'unclean EOF' in trans_err.args[0] + ): + pass + + # peer actor (task) prolly shutdown quickly due + # to cancellation + case trio.BrokenResourceError() if ( + 'Connection reset by peer' in trans_err.args[0] + ): + pass + + # unless the disconnect condition falls under "a + # normal operation breakage" we usualy console warn + # about it. + case _: + loglevel: str = 'warning' + + + raise TransportClosed( + message=( + f'{tpt_name} already closed by peer\n' + ), + src_exc=trans_err, + loglevel=loglevel, + ) from trans_err + + # XXX definitely can happen if transport is closed + # manually by another `trio.lowlevel.Task` in the + # same actor; we use this in some simulated fault + # testing for ex, but generally should never happen + # under normal operation! + # + # NOTE: as such we always re-raise this error from the + # RPC msg loop! + except trio.ClosedResourceError as cre: + closure_err = cre + + raise TransportClosed( + message=( + f'{tpt_name} was already closed locally ?\n' + ), + src_exc=closure_err, + loglevel='error', + raise_on_report=( + 'another task closed this fd' in closure_err.args + ), + ) from closure_err + + # graceful TCP EOF disconnect + if header == b'': + raise TransportClosed( + message=( + f'{tpt_name} already gracefully closed\n' + ), + loglevel='transport', + ) + + size: int + size, = struct.unpack(" None: + ''' + Send a msgpack encoded py-object-blob-as-msg over TCP. + + If `strict_types == True` then a `MsgTypeError` will be raised on any + invalid msg type + + ''' + __tracebackhide__: bool = hide_tb + + # XXX see `trio._sync.AsyncContextManagerMixin` for details + # on the `.acquire()`/`.release()` sequencing.. + async with self._send_lock: + + # NOTE: lookup the `trio.Task.context`'s var for + # the current `MsgCodec`. + codec: MsgCodec = _ctxvar_MsgCodec.get() + + # XXX for ctxvar debug only! + # if self._codec.pld_spec != codec.pld_spec: + # self._codec = codec + # log.runtime( + # f'Using new codec in {self}.send()\n' + # f'codec: {self._codec}\n\n' + # f'msg: {msg}\n' + # ) + + if type(msg) not in msgtypes.__msg_types__: + if strict_types: + raise _mk_send_mte( + msg, + codec=codec, + ) + else: + log.warning( + 'Sending non-`Msg`-spec msg?\n\n' + f'{msg}\n' + ) + + try: + bytes_data: bytes = codec.encode(msg) + except TypeError as _err: + typerr = _err + msgtyperr: MsgTypeError = _mk_send_mte( + msg, + codec=codec, + message=( + f'IPC-msg-spec violation in\n\n' + f'{pretty_struct.Struct.pformat(msg)}' + ), + src_type_error=typerr, + ) + raise msgtyperr from typerr + + # supposedly the fastest says, + # https://stackoverflow.com/a/54027962 + size: bytes = struct.pack(" + # except BaseException as _err: + # err = _err + # if not isinstance(err, MsgTypeError): + # __tracebackhide__: bool = False + # raise + + async def recv(self) -> msgtypes.MsgType: + return await self._aiter_pkts.asend(None) + + async def drain(self) -> AsyncIterator[dict]: + ''' + Drain the stream's remaining messages sent from + the far end until the connection is closed by + the peer. + + ''' + try: + async for msg in self._iter_packets(): + self.drained.append(msg) + except TransportClosed: + for msg in self.drained: + yield msg + + def __aiter__(self): + return self._aiter_pkts + + @property + def laddr(self) -> Address: + return self._laddr + + @property + def raddr(self) -> Address: + return self._raddr + + def pformat(self) -> str: + return ( + f'<{type(self).__name__}(\n' + f' |_peers: 1\n' + f' laddr: {self._laddr}\n' + f' raddr: {self._raddr}\n' + # f'\n' + f' |_task: {self._task}\n' + f')>\n' + ) + + __repr__ = __str__ = pformat diff --git a/tractor/ipc/_types.py b/tractor/ipc/_types.py new file mode 100644 index 00000000..59653b17 --- /dev/null +++ b/tractor/ipc/_types.py @@ -0,0 +1,123 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +IPC subsys type-lookup helpers? + +''' +from typing import ( + Type, + # TYPE_CHECKING, +) + +import trio +import socket + +from tractor.ipc._transport import ( + MsgTransportKey, + MsgTransport +) +from tractor.ipc._tcp import ( + TCPAddress, + MsgpackTCPStream, +) +from tractor.ipc._uds import ( + UDSAddress, + MsgpackUDSStream, +) + +# if TYPE_CHECKING: +# from tractor._addr import Address + + +Address = TCPAddress|UDSAddress + +# manually updated list of all supported msg transport types +_msg_transports = [ + MsgpackTCPStream, + MsgpackUDSStream +] + + +# convert a MsgTransportKey to the corresponding transport type +_key_to_transport: dict[ + MsgTransportKey, + Type[MsgTransport], +] = { + ('msgpack', 'tcp'): MsgpackTCPStream, + ('msgpack', 'uds'): MsgpackUDSStream, +} + +# convert an Address wrapper to its corresponding transport type +_addr_to_transport: dict[ + Type[TCPAddress|UDSAddress], + Type[MsgTransport] +] = { + TCPAddress: MsgpackTCPStream, + UDSAddress: MsgpackUDSStream, +} + + +def transport_from_addr( + addr: Address, + codec_key: str = 'msgpack', +) -> Type[MsgTransport]: + ''' + Given a destination address and a desired codec, find the + corresponding `MsgTransport` type. + + ''' + try: + return _addr_to_transport[type(addr)] + + except KeyError: + raise NotImplementedError( + f'No known transport for address {repr(addr)}' + ) + + +def transport_from_stream( + stream: trio.abc.Stream, + codec_key: str = 'msgpack' +) -> Type[MsgTransport]: + ''' + Given an arbitrary `trio.abc.Stream` and a desired codec, + find the corresponding `MsgTransport` type. + + ''' + transport = None + if isinstance(stream, trio.SocketStream): + sock: socket.socket = stream.socket + match sock.family: + case socket.AF_INET | socket.AF_INET6: + transport = 'tcp' + + case socket.AF_UNIX: + transport = 'uds' + + case _: + raise NotImplementedError( + f'Unsupported socket family: {sock.family}' + ) + + if not transport: + raise NotImplementedError( + f'Could not figure out transport type for stream type {type(stream)}' + ) + + key = (codec_key, transport) + + return _key_to_transport[key] diff --git a/tractor/ipc/_uds.py b/tractor/ipc/_uds.py new file mode 100644 index 00000000..e23fd8d2 --- /dev/null +++ b/tractor/ipc/_uds.py @@ -0,0 +1,458 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +''' +Unix Domain Socket implementation of tractor.ipc._transport.MsgTransport protocol + +''' +from __future__ import annotations +from contextlib import ( + contextmanager as cm, +) +from pathlib import Path +import os +from socket import ( + AF_UNIX, + SOCK_STREAM, + SO_PASSCRED, + SO_PEERCRED, + SOL_SOCKET, +) +import struct +from typing import ( + Type, + TYPE_CHECKING, + ClassVar, +) + +import msgspec +import trio +from trio import ( + socket, + SocketListener, +) +from trio._highlevel_open_unix_stream import ( + close_on_error, + has_unix, +) + +from tractor.msg import MsgCodec +from tractor.log import get_logger +from tractor.ipc._transport import ( + MsgpackTransport, +) +from .._state import ( + get_rt_dir, + current_actor, + is_root_process, +) + +if TYPE_CHECKING: + from ._runtime import Actor + + +log = get_logger(__name__) + + +def unwrap_sockpath( + sockpath: Path, +) -> tuple[Path, Path]: + return ( + sockpath.parent, + sockpath.name, + ) + + +class UDSAddress( + msgspec.Struct, + frozen=True, +): + filedir: str|Path|None + filename: str|Path + maybe_pid: int|None = None + + # TODO, maybe we should use better field and value + # -[x] really this is a `.protocol_key` not a "name" of anything. + # -[ ] consider a 'unix' proto-key instead? + # -[ ] need to check what other mult-transport frameworks do + # like zmq, nng, uri-spec et al! + proto_key: ClassVar[str] = 'uds' + unwrapped_type: ClassVar[type] = tuple[str, int] + def_bindspace: ClassVar[Path] = get_rt_dir() + + @property + def bindspace(self) -> Path: + ''' + We replicate the "ip-set-of-hosts" part of a UDS socket as + just the sub-directory in which we allocate socket files. + + ''' + return ( + self.filedir + or + self.def_bindspace + ) + + @property + def sockpath(self) -> Path: + return self.bindspace / self.filename + + @property + def is_valid(self) -> bool: + ''' + We block socket files not allocated under the runtime subdir. + + ''' + return self.bindspace in self.sockpath.parents + + @classmethod + def from_addr( + cls, + addr: ( + tuple[Path|str, Path|str]|Path|str + ), + ) -> UDSAddress: + match addr: + case tuple()|list(): + filedir = Path(addr[0]) + filename = Path(addr[1]) + return UDSAddress( + filedir=filedir, + filename=filename, + # maybe_pid=pid, + ) + # NOTE, in case we ever decide to just `.unwrap()` + # to a `Path|str`? + case str()|Path(): + sockpath: Path = Path(addr) + return UDSAddress(*unwrap_sockpath(sockpath)) + case _: + # import pdbp; pdbp.set_trace() + raise TypeError( + f'Bad unwrapped-address for {cls} !\n' + f'{addr!r}\n' + ) + + def unwrap(self) -> tuple[str, int]: + # XXX NOTE, since this gets passed DIRECTLY to + # `.ipc._uds.open_unix_socket_w_passcred()` + return ( + str(self.filedir), + str(self.filename), + ) + + @classmethod + def get_random( + cls, + bindspace: Path|None = None, # default netns + ) -> UDSAddress: + + filedir: Path = bindspace or cls.def_bindspace + pid: int = os.getpid() + actor: Actor|None = current_actor( + err_on_no_runtime=False, + ) + if actor: + sockname: str = '::'.join(actor.uid) + f'@{pid}' + else: + prefix: str = '' + if is_root_process(): + prefix: str = 'root' + sockname: str = f'{prefix}@{pid}' + + sockpath: Path = Path(f'{sockname}.sock') + return UDSAddress( + filedir=filedir, + filename=sockpath, + maybe_pid=pid, + ) + + @classmethod + def get_root(cls) -> UDSAddress: + def_uds_filename: Path = 'registry@1616.sock' + return UDSAddress( + filedir=cls.def_bindspace, + filename=def_uds_filename, + # maybe_pid=1616, + ) + + # ?TODO, maybe we should just our .msg.pretty_struct.Struct` for + # this instead? + # -[ ] is it too "multi-line"y tho? + # the compact tuple/.unwrapped() form is simple enough? + # + def __repr__(self) -> str: + if not (pid := self.maybe_pid): + pid: str = '' + + body: str = ( + f'({self.filedir}, {self.filename}, {pid})' + ) + return ( + f'{type(self).__name__}' + f'[' + f'{body}' + f']' + ) + +@cm +def _reraise_as_connerr( + src_excs: tuple[Type[Exception]], + addr: UDSAddress, +): + try: + yield + except src_excs as src_exc: + raise ConnectionError( + f'Bad UDS socket-filepath-as-address ??\n' + f'{addr}\n' + f' |_sockpath: {addr.sockpath}\n' + f'\n' + f'from src: {src_exc!r}\n' + ) from src_exc + + +async def start_listener( + addr: UDSAddress, + **kwargs, +) -> SocketListener: + ''' + Start listening for inbound connections via + a `trio.SocketListener` (task) which `socket.bind()`s on `addr`. + + Note, if the `UDSAddress.bindspace: Path` directory dne it is + implicitly created. + + ''' + sock = socket.socket( + socket.AF_UNIX, + socket.SOCK_STREAM + ) + log.info( + f'Attempting to bind UDS socket\n' + f'>[\n' + f'|_{addr}\n' + ) + + # ?TODO? should we use the `actor.lifetime_stack` + # to rm on shutdown? + bindpath: Path = addr.sockpath + if not (bs := addr.bindspace).is_dir(): + log.info( + 'Creating bindspace dir in file-sys\n' + f'>{{\n' + f'|_{bs!r}\n' + ) + bs.mkdir() + + with _reraise_as_connerr( + src_excs=( + FileNotFoundError, + OSError, + ), + addr=addr + ): + await sock.bind(str(bindpath)) + + sock.listen(1) + log.info( + f'Listening on UDS socket\n' + f'[>\n' + f' |_{addr}\n' + ) + return SocketListener(sock) + + +def close_listener( + addr: UDSAddress, + lstnr: SocketListener, +) -> None: + ''' + Close and remove the listening unix socket's path. + + ''' + lstnr.socket.close() + os.unlink(addr.sockpath) + + +async def open_unix_socket_w_passcred( + filename: str|bytes|os.PathLike[str]|os.PathLike[bytes], +) -> trio.SocketStream: + ''' + Literally the exact same as `trio.open_unix_socket()` except we set the additiona + `socket.SO_PASSCRED` option to ensure the server side (the process calling `accept()`) + can extract the connecting peer's credentials, namely OS specific process + related IDs. + + See this SO for "why" the extra opts, + - https://stackoverflow.com/a/7982749 + + ''' + if not has_unix: + raise RuntimeError("Unix sockets are not supported on this platform") + + # much more simplified logic vs tcp sockets - one socket type and only one + # possible location to connect to + sock = trio.socket.socket(AF_UNIX, SOCK_STREAM) + sock.setsockopt(SOL_SOCKET, SO_PASSCRED, 1) + with close_on_error(sock): + await sock.connect(os.fspath(filename)) + + return trio.SocketStream(sock) + + +def get_peer_info(sock: trio.socket.socket) -> tuple[ + int, # pid + int, # uid + int, # guid +]: + ''' + Deliver the connecting peer's "credentials"-info as defined in + a very Linux specific way.. + + For more deats see, + - `man accept`, + - `man unix`, + + this great online guide to all things sockets, + - https://beej.us/guide/bgnet/html/split-wide/man-pages.html#setsockoptman + + AND this **wonderful SO answer** + - https://stackoverflow.com/a/7982749 + + ''' + creds: bytes = sock.getsockopt( + SOL_SOCKET, + SO_PEERCRED, + struct.calcsize('3i') + ) + # i.e a tuple of the fields, + # pid: int, "process" + # uid: int, "user" + # gid: int, "group" + return struct.unpack('3i', creds) + + +class MsgpackUDSStream(MsgpackTransport): + ''' + A `trio.SocketStream` around a Unix-Domain-Socket transport + delivering `msgpack` encoded msgs using the `msgspec` codec lib. + + ''' + address_type = UDSAddress + layer_key: int = 4 + + @property + def maddr(self) -> str: + if not self.raddr: + return '' + + filepath: Path = Path(self.raddr.unwrap()[0]) + return ( + f'/{self.address_type.proto_key}/{filepath}' + # f'/{self.chan.uid[0]}' + # f'/{self.cid}' + + # f'/cid={cid_head}..{cid_tail}' + # TODO: ? not use this ^ right ? + ) + + def connected(self) -> bool: + return self.stream.socket.fileno() != -1 + + @classmethod + async def connect_to( + cls, + addr: UDSAddress, + prefix_size: int = 4, + codec: MsgCodec|None = None, + **kwargs + ) -> MsgpackUDSStream: + + + sockpath: Path = addr.sockpath + # + # ^XXX NOTE, we don't provide any out-of-band `.pid` info + # (like, over the socket as extra msgs) since the (augmented) + # `.setsockopt()` call tells the OS provide it; the client + # pid can then be read on server/listen() side via + # `get_peer_info()` above. + + with _reraise_as_connerr( + src_excs=( + FileNotFoundError, + ), + addr=addr + ): + stream = await open_unix_socket_w_passcred( + str(sockpath), + **kwargs + ) + + tpt_stream = MsgpackUDSStream( + stream, + prefix_size=prefix_size, + codec=codec + ) + # XXX assign from new addrs after peer-PID extract! + ( + tpt_stream._laddr, + tpt_stream._raddr, + ) = cls.get_stream_addrs(stream) + + return tpt_stream + + @classmethod + def get_stream_addrs( + cls, + stream: trio.SocketStream + ) -> tuple[ + Path, + int, + ]: + sock: trio.socket.socket = stream.socket + + # NOTE XXX, it's unclear why one or the other ends up being + # `bytes` versus the socket-file-path, i presume it's + # something to do with who is the server (called `.listen()`)? + # maybe could be better implemented using another info-query + # on the socket like, + # https://beej.us/guide/bgnet/html/split-wide/system-calls-or-bust.html#gethostnamewho-am-i + sockname: str|bytes = sock.getsockname() + # https://beej.us/guide/bgnet/html/split-wide/system-calls-or-bust.html#getpeernamewho-are-you + peername: str|bytes = sock.getpeername() + match (peername, sockname): + case (str(), bytes()): + sock_path: Path = Path(peername) + case (bytes(), str()): + sock_path: Path = Path(sockname) + ( + peer_pid, + _, + _, + ) = get_peer_info(sock) + + filedir, filename = unwrap_sockpath(sock_path) + laddr = UDSAddress( + filedir=filedir, + filename=filename, + maybe_pid=os.getpid(), + ) + raddr = UDSAddress( + filedir=filedir, + filename=filename, + maybe_pid=peer_pid + ) + return (laddr, raddr) diff --git a/tractor/log.py b/tractor/log.py index 74e0321b..329562b1 100644 --- a/tractor/log.py +++ b/tractor/log.py @@ -81,10 +81,35 @@ BOLD_PALETTE = { } +def at_least_level( + log: Logger|LoggerAdapter, + level: int|str, +) -> bool: + ''' + Predicate to test if a given level is active. + + ''' + if isinstance(level, str): + level: int = CUSTOM_LEVELS[level.upper()] + + if log.getEffectiveLevel() <= level: + return True + return False + + # TODO: this isn't showing the correct '{filename}' # as it did before.. class StackLevelAdapter(LoggerAdapter): + def at_least_level( + self, + level: str, + ) -> bool: + return at_least_level( + log=self, + level=level, + ) + def transport( self, msg: str, @@ -92,7 +117,7 @@ class StackLevelAdapter(LoggerAdapter): ) -> None: ''' IPC transport level msg IO; generally anything below - `._ipc.Channel` and friends. + `.ipc.Channel` and friends. ''' return self.log(5, msg) @@ -270,7 +295,9 @@ def get_logger( subsys_spec: str|None = None, ) -> StackLevelAdapter: - '''Return the package log or a sub-logger for ``name`` if provided. + ''' + Return the `tractor`-library root logger or a sub-logger for + `name` if provided. ''' log: Logger @@ -282,10 +309,10 @@ def get_logger( name != _proj_name ): - # NOTE: for handling for modules that use ``get_logger(__name__)`` + # NOTE: for handling for modules that use `get_logger(__name__)` # we make the following stylistic choice: # - always avoid duplicate project-package token - # in msg output: i.e. tractor.tractor _ipc.py in header + # in msg output: i.e. tractor.tractor.ipc._chan.py in header # looks ridiculous XD # - never show the leaf module name in the {name} part # since in python the {filename} is always this same @@ -331,7 +358,7 @@ def get_logger( def get_console_log( level: str|None = None, - logger: Logger|None = None, + logger: Logger|StackLevelAdapter|None = None, **kwargs, ) -> LoggerAdapter: @@ -344,12 +371,23 @@ def get_console_log( Yeah yeah, i know we can use `logging.config.dictConfig()`. You do it. ''' - log = get_logger( - logger=logger, - **kwargs - ) # set a root logger - logger: Logger = log.logger + # get/create a stack-aware-adapter + if ( + logger + and + isinstance(logger, StackLevelAdapter) + ): + # XXX, for ex. when passed in by a caller wrapping some + # other lib's logger instance with our level-adapter. + log = logger + else: + log: StackLevelAdapter = get_logger( + logger=logger, + **kwargs + ) + + logger: Logger|StackLevelAdapter = log.logger if not level: return log @@ -367,10 +405,7 @@ def get_console_log( None, ) ): - fmt = LOG_FORMAT - # if logger: - # fmt = None - + fmt: str = LOG_FORMAT # always apply our format? handler = StreamHandler() formatter = colorlog.ColoredFormatter( fmt=fmt, @@ -391,19 +426,3 @@ def get_loglevel() -> str: # global module logger for tractor itself log: StackLevelAdapter = get_logger('tractor') - - -def at_least_level( - log: Logger|LoggerAdapter, - level: int|str, -) -> bool: - ''' - Predicate to test if a given level is active. - - ''' - if isinstance(level, str): - level: int = CUSTOM_LEVELS[level.upper()] - - if log.getEffectiveLevel() <= level: - return True - return False diff --git a/tractor/msg/_ops.py b/tractor/msg/_ops.py index fbbbecff..1dad63c8 100644 --- a/tractor/msg/_ops.py +++ b/tractor/msg/_ops.py @@ -210,12 +210,14 @@ class PldRx(Struct): match msg: case Return()|Error(): log.runtime( - f'Rxed final outcome msg\n' + f'Rxed final-outcome msg\n' + f'\n' f'{msg}\n' ) case Stop(): log.runtime( f'Rxed stream stopped msg\n' + f'\n' f'{msg}\n' ) if passthrough_non_pld_msgs: @@ -261,8 +263,9 @@ class PldRx(Struct): if ( type(msg) is Return ): - log.info( + log.runtime( f'Rxed final result msg\n' + f'\n' f'{msg}\n' ) return self.decode_pld( @@ -304,10 +307,13 @@ class PldRx(Struct): try: pld: PayloadT = self._pld_dec.decode(pld) log.runtime( - 'Decoded msg payload\n\n' + f'Decoded payload for\n' + # f'\n' f'{msg}\n' - f'where payload decoded as\n' - f'|_pld={pld!r}\n' + # ^TODO?, ideally just render with `, + # pld={decode}` in the `msg.pformat()`?? + f'where, ' + f'{type(msg).__name__}.pld={pld!r}\n' ) return pld except TypeError as typerr: @@ -494,7 +500,8 @@ def limit_plds( finally: log.runtime( - 'Reverted to previous payload-decoder\n\n' + f'Reverted to previous payload-decoder\n' + f'\n' f'{orig_pldec}\n' ) # sanity on orig settings @@ -608,7 +615,7 @@ async def drain_to_final_msg( # # -[ ] make sure pause points work here for REPLing # the runtime itself; i.e. ensure there's no hangs! - # |_from tractor.devx._debug import pause + # |_from tractor.devx.debug import pause # await pause() # NOTE: we get here if the far end was @@ -629,7 +636,8 @@ async def drain_to_final_msg( (local_cs := rent_n.cancel_scope).cancel_called ): log.cancel( - 'RPC-ctx cancelled by local-parent scope during drain!\n\n' + f'RPC-ctx cancelled by local-parent scope during drain!\n' + f'\n' f'c}}>\n' f' |_{rent_n}\n' f' |_.cancel_scope = {local_cs}\n' @@ -663,7 +671,8 @@ async def drain_to_final_msg( # final result arrived! case Return(): log.runtime( - 'Context delivered final draining msg:\n' + f'Context delivered final draining msg\n' + f'\n' f'{pretty_struct.pformat(msg)}' ) ctx._result: Any = pld @@ -697,12 +706,14 @@ async def drain_to_final_msg( ): log.cancel( 'Cancelling `MsgStream` drain since ' - f'{reason}\n\n' + f'{reason}\n' + f'\n' f'<= {ctx.chan.uid}\n' - f' |_{ctx._nsf}()\n\n' + f' |_{ctx._nsf}()\n' + f'\n' f'=> {ctx._task}\n' - f' |_{ctx._stream}\n\n' - + f' |_{ctx._stream}\n' + f'\n' f'{pretty_struct.pformat(msg)}\n' ) break @@ -739,7 +750,8 @@ async def drain_to_final_msg( case Stop(): pre_result_drained.append(msg) log.runtime( # normal/expected shutdown transaction - 'Remote stream terminated due to "stop" msg:\n\n' + f'Remote stream terminated due to "stop" msg\n' + f'\n' f'{pretty_struct.pformat(msg)}\n' ) continue @@ -814,7 +826,8 @@ async def drain_to_final_msg( else: log.cancel( - 'Skipping `MsgStream` drain since final outcome is set\n\n' + f'Skipping `MsgStream` drain since final outcome is set\n' + f'\n' f'{ctx.outcome}\n' ) diff --git a/tractor/msg/pretty_struct.py b/tractor/msg/pretty_struct.py index 91eba8bd..169cb461 100644 --- a/tractor/msg/pretty_struct.py +++ b/tractor/msg/pretty_struct.py @@ -20,6 +20,7 @@ Prettified version of `msgspec.Struct` for easier console grokin. ''' from __future__ import annotations from collections import UserList +import textwrap from typing import ( Any, Iterator, @@ -105,27 +106,11 @@ def iter_fields(struct: Struct) -> Iterator[ ) -def pformat( +def iter_struct_ppfmt_lines( struct: Struct, - field_indent: int = 2, - indent: int = 0, + field_indent: int = 0, +) -> Iterator[tuple[str, str]]: -) -> str: - ''' - Recursion-safe `pprint.pformat()` style formatting of - a `msgspec.Struct` for sane reading by a human using a REPL. - - ''' - # global whitespace indent - ws: str = ' '*indent - - # field whitespace indent - field_ws: str = ' '*(field_indent + indent) - - # qtn: str = ws + struct.__class__.__qualname__ - qtn: str = struct.__class__.__qualname__ - - obj_str: str = '' # accumulator fi: structs.FieldInfo k: str v: Any @@ -135,15 +120,18 @@ def pformat( # ..]` over .__name__ == `Literal` but still get only the # latter for simple types like `str | int | None` etc..? ft: type = fi.type - typ_name: str = getattr(ft, '__name__', str(ft)) + typ_name: str = getattr( + ft, + '__name__', + str(ft) + ).replace(' ', '') # recurse to get sub-struct's `.pformat()` output Bo if isinstance(v, Struct): - val_str: str = v.pformat( - indent=field_indent + indent, - field_indent=indent + field_indent, + yield from iter_struct_ppfmt_lines( + struct=v, + field_indent=field_indent+field_indent, ) - else: val_str: str = repr(v) @@ -161,8 +149,39 @@ def pformat( # raise # return _Struct.__repr__(struct) - # TODO: LOLOL use `textwrap.indent()` instead dawwwwwg! - obj_str += (field_ws + f'{k}: {typ_name} = {val_str},\n') + yield ( + ' '*field_indent, # indented ws prefix + f'{k}: {typ_name} = {val_str},', # field's repr line content + ) + + +def pformat( + struct: Struct, + field_indent: int = 2, + indent: int = 0, +) -> str: + ''' + Recursion-safe `pprint.pformat()` style formatting of + a `msgspec.Struct` for sane reading by a human using a REPL. + + ''' + obj_str: str = '' # accumulator + for prefix, field_repr, in iter_struct_ppfmt_lines( + struct, + field_indent=field_indent, + ): + obj_str += f'{prefix}{field_repr}\n' + + # global whitespace indent + ws: str = ' '*indent + if indent: + obj_str: str = textwrap.indent( + text=obj_str, + prefix=ws, + ) + + # qtn: str = ws + struct.__class__.__qualname__ + qtn: str = struct.__class__.__qualname__ return ( f'{qtn}(\n' diff --git a/tractor/msg/types.py b/tractor/msg/types.py index 1cc8b78e..17d99449 100644 --- a/tractor/msg/types.py +++ b/tractor/msg/types.py @@ -31,6 +31,7 @@ from typing import ( Type, TypeVar, TypeAlias, + # TYPE_CHECKING, Union, ) @@ -47,6 +48,7 @@ from tractor.msg import ( pretty_struct, ) from tractor.log import get_logger +# from tractor._addr import UnwrappedAddress log = get_logger('tractor.msgspec') @@ -141,9 +143,49 @@ class Aid( ''' name: str uuid: str - # TODO: use built-in support for UUIDs? - # -[ ] `uuid.UUID` which has multi-protocol support - # https://jcristharif.com/msgspec/supported-types.html#uuid + pid: int|None = None + + # TODO? can/should we extend this field set? + # -[ ] use built-in support for UUIDs? `uuid.UUID` which has + # multi-protocol support + # https://jcristharif.com/msgspec/supported-types.html#uuid + # + # -[ ] as per the `.ipc._uds` / `._addr` comments, maybe we + # should also include at least `.pid` (equiv to port for tcp) + # and/or host-part always? + + @property + def uid(self) -> tuple[str, str]: + ''' + Legacy actor "unique-id" pair format. + + ''' + return ( + self.name, + self.uuid, + ) + + def reprol( + self, + sin_uuid: bool = True, + ) -> str: + if not sin_uuid: + return ( + f'{self.name}[{self.uuid[:6]}]@{self.pid!r}' + ) + return ( + f'{self.name}@{self.pid!r}' + ) + + # mk hashable via `.uuid` + def __hash__(self) -> int: + return hash(self.uuid) + + def __eq__(self, other: Aid) -> bool: + return self.uuid == other.uuid + + # use pretty fmt since often repr-ed for console/log + __repr__ = pretty_struct.Struct.__repr__ class SpawnSpec( @@ -161,14 +203,15 @@ class SpawnSpec( # a hard `Struct` def for all of these fields! _parent_main_data: dict _runtime_vars: dict[str, Any] + # ^NOTE see `._state._runtime_vars: dict` # module import capability enable_modules: dict[str, str] # TODO: not just sockaddr pairs? # -[ ] abstract into a `TransportAddr` type? - reg_addrs: list[tuple[str, int]] - bind_addrs: list[tuple[str, int]] + reg_addrs: list[tuple[str, str|int]] + bind_addrs: list[tuple[str, str|int]]|None # TODO: caps based RPC support in the payload? diff --git a/tractor/to_asyncio.py b/tractor/to_asyncio.py index 08b1ed25..bd46a5ab 100644 --- a/tractor/to_asyncio.py +++ b/tractor/to_asyncio.py @@ -38,7 +38,6 @@ from typing import ( import tractor from tractor._exceptions import ( InternalError, - is_multi_cancelled, TrioTaskExited, TrioCancelled, AsyncioTaskExited, @@ -49,7 +48,7 @@ from tractor._state import ( _runtime_vars, ) from tractor._context import Unresolved -from tractor.devx import _debug +from tractor.devx import debug from tractor.log import ( get_logger, StackLevelAdapter, @@ -59,6 +58,9 @@ from tractor.log import ( # from tractor.msg import ( # pretty_struct, # ) +from tractor.trionics import ( + is_multi_cancelled, +) from tractor.trionics._broadcast import ( broadcast_receiver, BroadcastReceiver, @@ -128,6 +130,7 @@ class LinkedTaskChannel( _trio_task: trio.Task _aio_task_complete: trio.Event + _closed_by_aio_task: bool = False _suppress_graceful_exits: bool = True _trio_err: BaseException|None = None @@ -206,10 +209,15 @@ class LinkedTaskChannel( async def aclose(self) -> None: await self._from_aio.aclose() - def started( + # ?TODO? async version of this? + def started_nowait( self, val: Any = None, ) -> None: + ''' + Synchronize aio-side with its trio-parent. + + ''' self._aio_started_val = val return self._to_trio.send_nowait(val) @@ -240,6 +248,7 @@ class LinkedTaskChannel( # cycle on the trio side? # await trio.lowlevel.checkpoint() return await self._from_aio.receive() + except BaseException as err: async with translate_aio_errors( chan=self, @@ -317,7 +326,7 @@ def _run_asyncio_task( qsize: int = 1, provide_channels: bool = False, suppress_graceful_exits: bool = True, - hide_tb: bool = False, + hide_tb: bool = True, **kwargs, ) -> LinkedTaskChannel: @@ -345,18 +354,6 @@ def _run_asyncio_task( # value otherwise it would just return ;P assert qsize > 1 - if provide_channels: - assert 'to_trio' in args - - # allow target func to accept/stream results manually by name - if 'to_trio' in args: - kwargs['to_trio'] = to_trio - - if 'from_trio' in args: - kwargs['from_trio'] = from_trio - - coro = func(**kwargs) - trio_task: trio.Task = trio.lowlevel.current_task() trio_cs = trio.CancelScope() aio_task_complete = trio.Event() @@ -371,6 +368,25 @@ def _run_asyncio_task( _suppress_graceful_exits=suppress_graceful_exits, ) + # allow target func to accept/stream results manually by name + if 'to_trio' in args: + kwargs['to_trio'] = to_trio + + if 'from_trio' in args: + kwargs['from_trio'] = from_trio + + if 'chan' in args: + kwargs['chan'] = chan + + if provide_channels: + assert ( + 'to_trio' in args + or + 'chan' in args + ) + + coro = func(**kwargs) + async def wait_on_coro_final_result( to_trio: trio.MemorySendChannel, coro: Awaitable, @@ -443,9 +459,23 @@ def _run_asyncio_task( f'Task exited with final result: {result!r}\n' ) - # only close the sender side which will relay - # a `trio.EndOfChannel` to the trio (consumer) side. + # XXX ALWAYS close the child-`asyncio`-task-side's + # `to_trio` handle which will in turn relay + # a `trio.EndOfChannel` to the `trio`-parent. + # Consequently the parent `trio` task MUST ALWAYS + # check for any `chan._aio_err` to be raised when it + # receives an EoC. + # + # NOTE, there are 2 EoC cases, + # - normal/graceful EoC due to the aio-side actually + # terminating its "streaming", but the task did not + # error and is not yet complete. + # + # - the aio-task terminated and we specially mark the + # closure as due to the `asyncio.Task`'s exit. + # to_trio.close() + chan._closed_by_aio_task = True aio_task_complete.set() log.runtime( @@ -479,12 +509,12 @@ def _run_asyncio_task( if ( debug_mode() and - (greenback := _debug.maybe_import_greenback( + (greenback := debug.maybe_import_greenback( force_reload=True, raise_not_found=False, )) ): - log.info( + log.devx( f'Bestowing `greenback` portal for `asyncio`-task\n' f'{task}\n' ) @@ -643,8 +673,9 @@ def _run_asyncio_task( not trio_cs.cancel_called ): log.cancel( - f'Cancelling `trio` side due to aio-side src exc\n' - f'{curr_aio_err}\n' + f'Cancelling trio-side due to aio-side src exc\n' + f'\n' + f'{curr_aio_err!r}\n' f'\n' f'(c>\n' f' |_{trio_task}\n' @@ -756,6 +787,7 @@ async def translate_aio_errors( aio_done_before_trio: bool = aio_task.done() assert aio_task trio_err: BaseException|None = None + eoc: trio.EndOfChannel|None = None try: yield # back to one of the cross-loop apis except trio.Cancelled as taskc: @@ -787,12 +819,48 @@ async def translate_aio_errors( # ) # raise - # XXX always passthrough EoC since this translator is often - # called from `LinkedTaskChannel.receive()` which we want - # passthrough and further we have no special meaning for it in - # terms of relaying errors or signals from the aio side! - except trio.EndOfChannel as eoc: - trio_err = chan._trio_err = eoc + # XXX EoC is a special SIGNAL from the aio-side here! + # There are 2 cases to handle: + # 1. the "EoC passthrough" case. + # - the aio-task actually closed the channel "gracefully" and + # the trio-task should unwind any ongoing channel + # iteration/receiving, + # |_this exc-translator wraps calls to `LinkedTaskChannel.receive()` + # in which case we want to relay the actual "end-of-chan" for + # iteration purposes. + # + # 2. relaying the "asyncio.Task termination" case. + # - if the aio-task terminates, maybe with an error, AND the + # `open_channel_from()` API was used, it will always signal + # that termination. + # |_`wait_on_coro_final_result()` always calls + # `to_trio.close()` when `provide_channels=True` so we need to + # always check if there is an aio-side exc which needs to be + # relayed to the parent trio side! + # |_in this case the special `chan._closed_by_aio_task` is + # ALWAYS set. + # + except trio.EndOfChannel as _eoc: + eoc = _eoc + if ( + chan._closed_by_aio_task + and + aio_err + ): + log.cancel( + f'The asyncio-child task terminated due to error\n' + f'{aio_err!r}\n' + ) + chan._trio_to_raise = aio_err + trio_err = chan._trio_err = eoc + # + # ?TODO?, raise something like a, + # chan._trio_to_raise = AsyncioErrored() + # BUT, with the tb rewritten to reflect the underlying + # call stack? + else: + trio_err = chan._trio_err = eoc + raise eoc # NOTE ALSO SEE the matching note in the `cancel_trio()` asyncio @@ -841,7 +909,7 @@ async def translate_aio_errors( except BaseException as _trio_err: trio_err = chan._trio_err = _trio_err # await tractor.pause(shield=True) # workx! - entered: bool = await _debug._maybe_enter_pm( + entered: bool = await debug._maybe_enter_pm( trio_err, api_frame=inspect.currentframe(), ) @@ -1045,7 +1113,7 @@ async def translate_aio_errors( # if wait_on_aio_task: await chan._aio_task_complete.wait() - log.info( + log.debug( 'asyncio-task is done and unblocked trio-side!\n' ) @@ -1062,11 +1130,17 @@ async def translate_aio_errors( trio_to_raise: ( AsyncioCancelled| AsyncioTaskExited| + Exception| # relayed from aio-task None ) = chan._trio_to_raise + raise_from: Exception = ( + trio_err if (aio_err is trio_to_raise) + else aio_err + ) + if not suppress_graceful_exits: - raise trio_to_raise from (aio_err or trio_err) + raise trio_to_raise from raise_from if trio_to_raise: match ( @@ -1099,7 +1173,7 @@ async def translate_aio_errors( ) return case _: - raise trio_to_raise from (aio_err or trio_err) + raise trio_to_raise from raise_from # Check if the asyncio-side is the cause of the trio-side # error. @@ -1165,7 +1239,6 @@ async def run_task( @acm async def open_channel_from( - target: Callable[..., Any], suppress_graceful_exits: bool = True, **target_kwargs, @@ -1199,7 +1272,6 @@ async def open_channel_from( # deliver stream handle upward yield first, chan except trio.Cancelled as taskc: - # await tractor.pause(shield=True) # ya it worx ;) if cs.cancel_called: if isinstance(chan._trio_to_raise, AsyncioCancelled): log.cancel( @@ -1406,7 +1478,7 @@ def run_as_asyncio_guest( ) # XXX make it obvi we know this isn't supported yet! assert 0 - # await _debug.maybe_init_greenback( + # await debug.maybe_init_greenback( # force_reload=True, # ) diff --git a/tractor/trionics/__init__.py b/tractor/trionics/__init__.py index df9b6f26..2e91aa30 100644 --- a/tractor/trionics/__init__.py +++ b/tractor/trionics/__init__.py @@ -31,4 +31,9 @@ from ._broadcast import ( ) from ._beg import ( collapse_eg as collapse_eg, + get_collapsed_eg as get_collapsed_eg, + is_multi_cancelled as is_multi_cancelled, +) +from ._taskc import ( + maybe_raise_from_masking_exc as maybe_raise_from_masking_exc, ) diff --git a/tractor/trionics/_beg.py b/tractor/trionics/_beg.py index 843b9f70..f466ab3c 100644 --- a/tractor/trionics/_beg.py +++ b/tractor/trionics/_beg.py @@ -15,31 +15,94 @@ # along with this program. If not, see . ''' -`BaseExceptionGroup` related utils and helpers pertaining to -first-class-`trio` from a historical perspective B) +`BaseExceptionGroup` utils and helpers pertaining to +first-class-`trio` from a "historical" perspective, like "loose +exception group" task-nurseries. ''' from contextlib import ( asynccontextmanager as acm, ) +from typing import ( + Literal, + Type, +) + +import trio +# from trio._core._concat_tb import ( +# concat_tb, +# ) -def maybe_collapse_eg( - beg: BaseExceptionGroup, +# XXX NOTE +# taken verbatim from `trio._core._run` except, +# - remove the NONSTRICT_EXCEPTIONGROUP_NOTE deprecation-note +# guard-check; we know we want an explicit collapse. +# - mask out tb rewriting in collapse case, i don't think it really +# matters? +# +def collapse_exception_group( + excgroup: BaseExceptionGroup[BaseException], ) -> BaseException: + """Recursively collapse any single-exception groups into that single contained + exception. + + """ + exceptions = list(excgroup.exceptions) + modified = False + for i, exc in enumerate(exceptions): + if isinstance(exc, BaseExceptionGroup): + new_exc = collapse_exception_group(exc) + if new_exc is not exc: + modified = True + exceptions[i] = new_exc + + if ( + len(exceptions) == 1 + and isinstance(excgroup, BaseExceptionGroup) + + # XXX trio's loose-setting condition.. + # and NONSTRICT_EXCEPTIONGROUP_NOTE in getattr(excgroup, "__notes__", ()) + ): + # exceptions[0].__traceback__ = concat_tb( + # excgroup.__traceback__, + # exceptions[0].__traceback__, + # ) + return exceptions[0] + elif modified: + return excgroup.derive(exceptions) + else: + return excgroup + + +def get_collapsed_eg( + beg: BaseExceptionGroup, + +) -> BaseException|None: ''' - If the input beg can collapse to a single non-eg sub-exception, - return it instead. + If the input beg can collapse to a single sub-exception which is + itself **not** an eg, return it. ''' - if len(excs := beg.exceptions) == 1: - return excs[0] + maybe_exc = collapse_exception_group(beg) + if maybe_exc is beg: + return None - return beg + return maybe_exc @acm -async def collapse_eg(): +async def collapse_eg( + hide_tb: bool = True, + + # XXX, for ex. will always show begs containing single taskc + ignore: set[Type[BaseException]] = { + # trio.Cancelled, + }, + add_notes: bool = True, + + bp: bool = False, +): ''' If `BaseExceptionGroup` raised in the body scope is "collapse-able" (in the same way that @@ -47,12 +110,114 @@ async def collapse_eg(): only raise the lone emedded non-eg in in place. ''' + __tracebackhide__: bool = hide_tb try: yield - except* BaseException as beg: - if ( - exc := maybe_collapse_eg(beg) - ) is not beg: - raise exc + except BaseExceptionGroup as _beg: + beg = _beg - raise beg + if ( + bp + and + len(beg.exceptions) > 1 + ): + import tractor + if tractor.current_actor( + err_on_no_runtime=False, + ): + await tractor.pause(shield=True) + else: + breakpoint() + + if ( + (exc := get_collapsed_eg(beg)) + and + type(exc) not in ignore + ): + + # TODO? report number of nested groups it was collapsed + # *from*? + if add_notes: + from_group_note: str = ( + '( ^^^ this exc was collapsed from a group ^^^ )\n' + ) + if ( + from_group_note + not in + getattr(exc, "__notes__", ()) + ): + exc.add_note(from_group_note) + + # raise exc + # ^^ this will leave the orig beg tb above with the + # "during the handling of the following.." + # So, instead do.. + # + if cause := exc.__cause__: + raise exc from cause + else: + # suppress "during handling of " + # output in tb/console. + raise exc from None + + # keep original + raise # beg + + +def is_multi_cancelled( + beg: BaseException|BaseExceptionGroup, + + ignore_nested: set[BaseException] = set(), + +) -> Literal[False]|BaseExceptionGroup: + ''' + Predicate to determine if an `BaseExceptionGroup` only contains + some (maybe nested) set of sub-grouped exceptions (like only + `trio.Cancelled`s which get swallowed silently by default) and is + thus the result of "gracefully cancelling" a collection of + sub-tasks (or other conc primitives) and receiving a "cancelled + ACK" from each after termination. + + Docs: + ---- + - https://docs.python.org/3/library/exceptions.html#exception-groups + - https://docs.python.org/3/library/exceptions.html#BaseExceptionGroup.subgroup + + ''' + + if ( + not ignore_nested + or + trio.Cancelled not in ignore_nested + # XXX always count-in `trio`'s native signal + ): + ignore_nested.update({trio.Cancelled}) + + if isinstance(beg, BaseExceptionGroup): + # https://docs.python.org/3/library/exceptions.html#BaseExceptionGroup.subgroup + # |_ "The condition can be an exception type or tuple of + # exception types, in which case each exception is checked + # for a match using the same check that is used in an + # except clause. The condition can also be a callable + # (other than a type object) that accepts an exception as + # its single argument and returns true for the exceptions + # that should be in the subgroup." + matched_exc: BaseExceptionGroup|None = beg.subgroup( + tuple(ignore_nested), + + # ??TODO, complain about why not allowed to use + # named arg style calling??? + # XD .. wtf? + # condition=tuple(ignore_nested), + ) + if matched_exc is not None: + return matched_exc + + # NOTE, IFF no excs types match (throughout the error-tree) + # -> return `False`, OW return the matched sub-eg. + # + # IOW, for the inverse of ^ for the purpose of + # maybe-enter-REPL--logic: "only debug when the err-tree contains + # at least one exc-type NOT in `ignore_nested`" ; i.e. the case where + # we fallthrough and return `False` here. + return False diff --git a/tractor/trionics/_mngrs.py b/tractor/trionics/_mngrs.py index 9a5ed156..3acfbeda 100644 --- a/tractor/trionics/_mngrs.py +++ b/tractor/trionics/_mngrs.py @@ -40,6 +40,11 @@ from typing import ( import trio from tractor._state import current_actor from tractor.log import get_logger +# from ._beg import collapse_eg +# from ._taskc import ( +# maybe_raise_from_masking_exc, +# ) + if TYPE_CHECKING: from tractor import ActorNursery @@ -70,7 +75,8 @@ async def maybe_open_nursery( yield nursery else: async with lib.open_nursery(**kwargs) as nursery: - nursery.cancel_scope.shield = shield + if lib == trio: + nursery.cancel_scope.shield = shield yield nursery @@ -103,6 +109,9 @@ async def _enter_and_wait( async def gather_contexts( mngrs: Sequence[AsyncContextManager[T]], + # caller can provide their own scope + tn: trio.Nursery|None = None, + ) -> AsyncGenerator[ tuple[ T | None, @@ -111,17 +120,19 @@ async def gather_contexts( None, ]: ''' - Concurrently enter a sequence of async context managers (acms), - each from a separate `trio` task and deliver the unwrapped - `yield`-ed values in the same order once all managers have entered. + Concurrently enter a sequence of async context managers (`acm`s), + each scheduled in a separate `trio.Task` and deliver their + unwrapped `yield`-ed values in the same order once all `@acm`s + in every task have entered. - On exit, all acms are subsequently and concurrently exited. + On exit, all `acm`s are subsequently and concurrently exited with + **no order guarantees**. This function is somewhat similar to a batch of non-blocking calls to `contextlib.AsyncExitStack.enter_async_context()` (inside a loop) *in combo with* a `asyncio.gather()` to get the `.__aenter__()`-ed values, except the managers are both - concurrently entered and exited and *cancellation just works*(R). + concurrently entered and exited and *cancellation-just-works™*. ''' seed: int = id(mngrs) @@ -141,37 +152,47 @@ async def gather_contexts( if not mngrs: raise ValueError( '`.trionics.gather_contexts()` input mngrs is empty?\n' + '\n' 'Did try to use inline generator syntax?\n' - 'Use a non-lazy iterator or sequence type intead!' + 'Check that list({mngrs}) works!\n' + # 'or sequence-type intead!\n' + # 'Use a non-lazy iterator or sequence-type intead!\n' ) - async with trio.open_nursery( - strict_exception_groups=False, - # ^XXX^ TODO? soo roll our own then ?? - # -> since we kinda want the "if only one `.exception` then - # just raise that" interface? - ) as tn: - for mngr in mngrs: - tn.start_soon( - _enter_and_wait, - mngr, - unwrapped, - all_entered, - parent_exit, - seed, - ) + try: + async with ( + # + # ?TODO, does including these (eg-collapsing, + # taskc-unmasking) improve tb noise-reduction/legibility? + # + # collapse_eg(), + maybe_open_nursery( + nursery=tn, + ) as tn, + # maybe_raise_from_masking_exc(), + ): + for mngr in mngrs: + tn.start_soon( + _enter_and_wait, + mngr, + unwrapped, + all_entered, + parent_exit, + seed, + ) - # deliver control once all managers have started up - await all_entered.wait() - - try: + # deliver control to caller once all ctx-managers have + # started (yielded back to us). + await all_entered.wait() yield tuple(unwrapped.values()) - finally: - # NOTE: this is ABSOLUTELY REQUIRED to avoid - # the following wacky bug: - # parent_exit.set() + finally: + # XXX NOTE: this is ABSOLUTELY REQUIRED to avoid + # the following wacky bug: + # + parent_exit.set() + # Per actor task caching helpers. # Further potential examples of interest: @@ -183,7 +204,7 @@ class _Cache: a kept-alive-while-in-use async resource. ''' - service_n: Optional[trio.Nursery] = None + service_tn: Optional[trio.Nursery] = None locks: dict[Hashable, trio.Lock] = {} users: int = 0 values: dict[Any, Any] = {} @@ -224,6 +245,9 @@ async def maybe_open_context( kwargs: dict = {}, key: Hashable | Callable[..., Hashable] = None, + # caller can provide their own scope + tn: trio.Nursery|None = None, + ) -> AsyncIterator[tuple[bool, T]]: ''' Maybe open an async-context-manager (acm) if there is not already @@ -256,40 +280,94 @@ async def maybe_open_context( # have it not be closed until all consumers have exited (which is # currently difficult to implement any other way besides using our # pre-allocated runtime instance..) - service_n: trio.Nursery = current_actor()._service_n + if tn: + # TODO, assert tn is eventual parent of this task! + task: trio.Task = trio.lowlevel.current_task() + task_tn: trio.Nursery = task.parent_nursery + if not tn._cancel_status.encloses( + task_tn._cancel_status + ): + raise RuntimeError( + f'Mis-nesting of task under provided {tn} !?\n' + f'Current task is NOT a child(-ish)!!\n' + f'\n' + f'task: {task}\n' + f'task_tn: {task_tn}\n' + ) + service_tn = tn + else: + service_tn: trio.Nursery = current_actor()._service_tn # TODO: is there any way to allocate # a 'stays-open-till-last-task-finshed nursery? - # service_n: trio.Nursery - # async with maybe_open_nursery(_Cache.service_n) as service_n: - # _Cache.service_n = service_n + # service_tn: trio.Nursery + # async with maybe_open_nursery(_Cache.service_tn) as service_tn: + # _Cache.service_tn = service_tn + cache_miss_ke: KeyError|None = None + maybe_taskc: trio.Cancelled|None = None try: # **critical section** that should prevent other tasks from # checking the _Cache until complete otherwise the scheduler # may switch and by accident we create more then one resource. yielded = _Cache.values[ctx_key] - except KeyError: - log.debug(f'Allocating new {acm_func} for {ctx_key}') - mngr = acm_func(**kwargs) - resources = _Cache.resources - assert not resources.get(ctx_key), f'Resource exists? {ctx_key}' - resources[ctx_key] = (service_n, trio.Event()) + except KeyError as _ke: + # XXX, stay mutexed up to cache-miss yield + try: + cache_miss_ke = _ke + log.debug( + f'Allocating new @acm-func entry\n' + f'ctx_key={ctx_key}\n' + f'acm_func={acm_func}\n' + ) + mngr = acm_func(**kwargs) + resources = _Cache.resources + assert not resources.get(ctx_key), f'Resource exists? {ctx_key}' + resources[ctx_key] = (service_tn, trio.Event()) + yielded: Any = await service_tn.start( + _Cache.run_ctx, + mngr, + ctx_key, + ) + _Cache.users += 1 + finally: + # XXX, since this runs from an `except` it's a checkpoint + # whih can be `trio.Cancelled`-masked. + # + # NOTE, in that case the mutex is never released by the + # (first and) caching task and **we can't** simply shield + # bc that will inf-block on the `await + # no_more_users.wait()`. + # + # SO just always unlock! + lock.release() - # sync up to the mngr's yielded value - yielded = await service_n.start( - _Cache.run_ctx, - mngr, - ctx_key, - ) - _Cache.users += 1 - lock.release() - yield False, yielded + try: + yield ( + False, # cache_hit = "no" + yielded, + ) + except trio.Cancelled as taskc: + maybe_taskc = taskc + log.cancel( + f'Cancelled from cache-miss entry\n' + f'\n' + f'ctx_key: {ctx_key!r}\n' + f'mngr: {mngr!r}\n' + ) + # XXX, always unset ke from cancelled context + # since we never consider it a masked exc case! + # - bc this can be called directly ty `._rpc._invoke()`? + # + if maybe_taskc.__context__ is cache_miss_ke: + maybe_taskc.__context__ = None + + raise taskc else: _Cache.users += 1 - log.runtime( + log.debug( f'Re-using cached resource for user {_Cache.users}\n\n' f'{ctx_key!r} -> {type(yielded)}\n' @@ -299,9 +377,19 @@ async def maybe_open_context( # f'{ctx_key!r} -> {yielded!r}\n' ) lock.release() - yield True, yielded + yield ( + True, # cache_hit = "yes" + yielded, + ) finally: + if lock.locked(): + stats: trio.LockStatistics = lock.statistics() + log.error( + f'Lock left locked by last owner !?\n' + f'{stats}\n' + ) + _Cache.users -= 1 if yielded is not None: diff --git a/tractor/trionics/_taskc.py b/tractor/trionics/_taskc.py new file mode 100644 index 00000000..8809524b --- /dev/null +++ b/tractor/trionics/_taskc.py @@ -0,0 +1,184 @@ +# tractor: structured concurrent "actors". +# Copyright 2018-eternity Tyler Goodlet. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +`trio.Task` cancellation helpers, extensions and "holsters". + +''' +from __future__ import annotations +from contextlib import ( + asynccontextmanager as acm, +) +from typing import TYPE_CHECKING + +import trio +from tractor.log import get_logger + +log = get_logger(__name__) + + +if TYPE_CHECKING: + from tractor.devx.debug import BoxedMaybeException + + +def find_masked_excs( + maybe_masker: BaseException, + unmask_from: set[BaseException], +) -> BaseException|None: + '''' + Deliver any `maybe_masker.__context__` provided + it a declared masking exc-type entry in `unmask_from`. + + ''' + if ( + type(maybe_masker) in unmask_from + and + (exc_ctx := maybe_masker.__context__) + + # TODO? what about any cases where + # they could be the same type but not same instance? + # |_i.e. a cancel masking a cancel ?? + # or ( + # exc_ctx is not maybe_masker + # ) + ): + return exc_ctx + + return None + + +# XXX, relevant discussion @ `trio`-core, +# https://github.com/python-trio/trio/issues/455 +# +@acm +async def maybe_raise_from_masking_exc( + tn: trio.Nursery|None = None, + unmask_from: ( + BaseException| + tuple[BaseException] + ) = (trio.Cancelled,), + + raise_unmasked: bool = True, + extra_note: str = ( + 'This can occurr when,\n' + ' - a `trio.Nursery` scope embeds a `finally:`-block ' + 'which executes a checkpoint!' + # + # ^TODO? other cases? + ), + + always_warn_on: tuple[BaseException] = ( + trio.Cancelled, + ), + # ^XXX, special case(s) where we warn-log bc likely + # there will be no operational diff since the exc + # is always expected to be consumed. +) -> BoxedMaybeException: + ''' + Maybe un-mask and re-raise exception(s) suppressed by a known + error-used-as-signal type (cough namely `trio.Cancelled`). + + Though this unmasker targets cancelleds, it can be used more + generally to capture and unwrap masked excs detected as + `.__context__` values which were suppressed by any error type + passed in `unmask_from`. + + ------------- + STILL-TODO ?? + ------------- + -[ ] support for egs which have multiple masked entries in + `maybe_eg.exceptions`, in which case we should unmask the + individual sub-excs but maintain the eg-parent's form right? + + ''' + from tractor.devx.debug import ( + BoxedMaybeException, + pause, + ) + boxed_maybe_exc = BoxedMaybeException( + raise_on_exit=raise_unmasked, + ) + matching: list[BaseException]|None = None + maybe_eg: ExceptionGroup|None + + if tn: + try: # handle egs + yield boxed_maybe_exc + return + except* unmask_from as _maybe_eg: + maybe_eg = _maybe_eg + matches: ExceptionGroup + matches, _ = maybe_eg.split( + unmask_from + ) + if not matches: + raise + + matching: list[BaseException] = matches.exceptions + else: + try: # handle non-egs + yield boxed_maybe_exc + return + except unmask_from as _maybe_exc: + maybe_exc = _maybe_exc + matching: list[BaseException] = [ + maybe_exc + ] + + # XXX, only unmask-ed for debuggin! + # TODO, remove eventually.. + except BaseException as _berr: + berr = _berr + await pause(shield=True) + raise berr + + if matching is None: + raise + + masked: list[tuple[BaseException, BaseException]] = [] + for exc_match in matching: + + if exc_ctx := find_masked_excs( + maybe_masker=exc_match, + unmask_from={unmask_from}, + ): + masked.append((exc_ctx, exc_match)) + boxed_maybe_exc.value = exc_match + note: str = ( + f'\n' + f'^^WARNING^^ the above {exc_ctx!r} was masked by a {unmask_from!r}\n' + ) + if extra_note: + note += ( + f'\n' + f'{extra_note}\n' + ) + exc_ctx.add_note(note) + + if type(exc_match) in always_warn_on: + log.warning(note) + + # await tractor.pause(shield=True) + if raise_unmasked: + + if len(masked) < 2: + raise exc_ctx from exc_match + else: + # ?TODO, see above but, possibly unmasking sub-exc + # entries if there are > 1 + await pause(shield=True) + else: + raise diff --git a/uv.lock b/uv.lock index e1c409f5..3c05dc2f 100644 --- a/uv.lock +++ b/uv.lock @@ -1,14 +1,23 @@ version = 1 -revision = 1 +revision = 2 requires-python = ">=3.11" [[package]] name = "attrs" version = "24.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984 } +sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984, upload-time = "2024-12-16T06:59:29.899Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397 }, + { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397, upload-time = "2024-12-16T06:59:26.977Z" }, +] + +[[package]] +name = "bidict" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9a/6e/026678aa5a830e07cd9498a05d3e7e650a4f56a42f267a53d22bcda1bdc9/bidict-0.23.1.tar.gz", hash = "sha256:03069d763bc387bbd20e7d49914e75fc4132a41937fa3405417e1a5a2d006d71", size = 29093, upload-time = "2024-02-18T19:09:05.748Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764, upload-time = "2024-02-18T19:09:04.156Z" }, ] [[package]] @@ -18,23 +27,51 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pycparser" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 }, - { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 }, - { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, - { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] @@ -44,9 +81,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d3/7a/359f4d5df2353f26172b3cc39ea32daa39af8de522205f512f458923e677/colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2", size = 16624 } +sdist = { url = "https://files.pythonhosted.org/packages/d3/7a/359f4d5df2353f26172b3cc39ea32daa39af8de522205f512f458923e677/colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2", size = 16624, upload-time = "2024-10-29T18:34:51.011Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424 }, + { url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424, upload-time = "2024-10-29T18:34:49.815Z" }, ] [[package]] @@ -58,98 +95,98 @@ dependencies = [ { name = "outcome" }, { name = "sniffio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/dc/c1/ab3a42c0f3ed56df9cd33de1539b3198d98c6ccbaf88a73d6be0b72d85e0/greenback-1.2.1.tar.gz", hash = "sha256:de3ca656885c03b96dab36079f3de74bb5ba061da9bfe3bb69dccc866ef95ea3", size = 42597 } +sdist = { url = "https://files.pythonhosted.org/packages/dc/c1/ab3a42c0f3ed56df9cd33de1539b3198d98c6ccbaf88a73d6be0b72d85e0/greenback-1.2.1.tar.gz", hash = "sha256:de3ca656885c03b96dab36079f3de74bb5ba061da9bfe3bb69dccc866ef95ea3", size = 42597, upload-time = "2024-02-20T21:23:13.239Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/d0/b8dc79d5ecfffacad9c844b6ae76b9c6259935796d3c561deccbf8fa421d/greenback-1.2.1-py3-none-any.whl", hash = "sha256:98768edbbe4340091a9730cf64a683fcbaa3f2cb81e4ac41d7ed28d3b6f74b79", size = 28062 }, + { url = "https://files.pythonhosted.org/packages/71/d0/b8dc79d5ecfffacad9c844b6ae76b9c6259935796d3c561deccbf8fa421d/greenback-1.2.1-py3-none-any.whl", hash = "sha256:98768edbbe4340091a9730cf64a683fcbaa3f2cb81e4ac41d7ed28d3b6f74b79", size = 28062, upload-time = "2024-02-20T21:23:12.031Z" }, ] [[package]] name = "greenlet" version = "3.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022 } +sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022, upload-time = "2024-09-20T18:21:04.506Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479 }, - { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404 }, - { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813 }, - { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517 }, - { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831 }, - { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413 }, - { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619 }, - { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198 }, - { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930 }, - { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260 }, - { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064 }, - { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420 }, - { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035 }, - { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105 }, - { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077 }, - { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, - { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, - { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, - { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, - { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, - { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, - { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, - { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, - { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, - { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, - { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, - { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, - { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, - { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, - { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, - { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, - { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, - { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, - { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, + { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479, upload-time = "2024-09-20T17:07:22.332Z" }, + { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404, upload-time = "2024-09-20T17:36:45.588Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813, upload-time = "2024-09-20T17:39:19.052Z" }, + { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517, upload-time = "2024-09-20T17:44:24.101Z" }, + { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831, upload-time = "2024-09-20T17:08:40.577Z" }, + { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413, upload-time = "2024-09-20T17:08:31.728Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619, upload-time = "2024-09-20T17:44:14.222Z" }, + { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198, upload-time = "2024-09-20T17:09:23.903Z" }, + { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930, upload-time = "2024-09-20T17:25:18.656Z" }, + { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260, upload-time = "2024-09-20T17:08:07.301Z" }, + { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064, upload-time = "2024-09-20T17:36:47.628Z" }, + { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420, upload-time = "2024-09-20T17:39:21.258Z" }, + { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035, upload-time = "2024-09-20T17:44:26.501Z" }, + { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105, upload-time = "2024-09-20T17:08:42.048Z" }, + { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077, upload-time = "2024-09-20T17:08:33.707Z" }, + { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975, upload-time = "2024-09-20T17:44:15.989Z" }, + { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955, upload-time = "2024-09-20T17:09:25.539Z" }, + { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655, upload-time = "2024-09-20T17:21:22.427Z" }, + { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990, upload-time = "2024-09-20T17:08:26.312Z" }, + { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175, upload-time = "2024-09-20T17:36:48.983Z" }, + { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425, upload-time = "2024-09-20T17:39:22.705Z" }, + { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736, upload-time = "2024-09-20T17:44:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347, upload-time = "2024-09-20T17:08:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583, upload-time = "2024-09-20T17:08:36.85Z" }, + { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039, upload-time = "2024-09-20T17:44:18.287Z" }, + { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716, upload-time = "2024-09-20T17:09:27.112Z" }, + { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490, upload-time = "2024-09-20T17:17:09.501Z" }, + { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731, upload-time = "2024-09-20T17:36:50.376Z" }, + { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304, upload-time = "2024-09-20T17:39:24.55Z" }, + { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537, upload-time = "2024-09-20T17:44:31.102Z" }, + { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506, upload-time = "2024-09-20T17:08:47.852Z" }, + { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753, upload-time = "2024-09-20T17:08:38.079Z" }, + { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731, upload-time = "2024-09-20T17:44:20.556Z" }, + { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112, upload-time = "2024-09-20T17:09:28.753Z" }, ] [[package]] name = "idna" version = "3.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, ] [[package]] name = "iniconfig" version = "2.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" }, ] [[package]] name = "msgspec" version = "0.19.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cf/9b/95d8ce458462b8b71b8a70fa94563b2498b89933689f3a7b8911edfae3d7/msgspec-0.19.0.tar.gz", hash = "sha256:604037e7cd475345848116e89c553aa9a233259733ab51986ac924ab1b976f8e", size = 216934 } +sdist = { url = "https://files.pythonhosted.org/packages/cf/9b/95d8ce458462b8b71b8a70fa94563b2498b89933689f3a7b8911edfae3d7/msgspec-0.19.0.tar.gz", hash = "sha256:604037e7cd475345848116e89c553aa9a233259733ab51986ac924ab1b976f8e", size = 216934, upload-time = "2024-12-27T17:40:28.597Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/24/d4/2ec2567ac30dab072cce3e91fb17803c52f0a37aab6b0c24375d2b20a581/msgspec-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa77046904db764b0462036bc63ef71f02b75b8f72e9c9dd4c447d6da1ed8f8e", size = 187939 }, - { url = "https://files.pythonhosted.org/packages/2b/c0/18226e4328897f4f19875cb62bb9259fe47e901eade9d9376ab5f251a929/msgspec-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:047cfa8675eb3bad68722cfe95c60e7afabf84d1bd8938979dd2b92e9e4a9551", size = 182202 }, - { url = "https://files.pythonhosted.org/packages/81/25/3a4b24d468203d8af90d1d351b77ea3cffb96b29492855cf83078f16bfe4/msgspec-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e78f46ff39a427e10b4a61614a2777ad69559cc8d603a7c05681f5a595ea98f7", size = 209029 }, - { url = "https://files.pythonhosted.org/packages/85/2e/db7e189b57901955239f7689b5dcd6ae9458637a9c66747326726c650523/msgspec-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c7adf191e4bd3be0e9231c3b6dc20cf1199ada2af523885efc2ed218eafd011", size = 210682 }, - { url = "https://files.pythonhosted.org/packages/03/97/7c8895c9074a97052d7e4a1cc1230b7b6e2ca2486714eb12c3f08bb9d284/msgspec-0.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f04cad4385e20be7c7176bb8ae3dca54a08e9756cfc97bcdb4f18560c3042063", size = 214003 }, - { url = "https://files.pythonhosted.org/packages/61/61/e892997bcaa289559b4d5869f066a8021b79f4bf8e955f831b095f47a4cd/msgspec-0.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45c8fb410670b3b7eb884d44a75589377c341ec1392b778311acdbfa55187716", size = 216833 }, - { url = "https://files.pythonhosted.org/packages/ce/3d/71b2dffd3a1c743ffe13296ff701ee503feaebc3f04d0e75613b6563c374/msgspec-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:70eaef4934b87193a27d802534dc466778ad8d536e296ae2f9334e182ac27b6c", size = 186184 }, - { url = "https://files.pythonhosted.org/packages/b2/5f/a70c24f075e3e7af2fae5414c7048b0e11389685b7f717bb55ba282a34a7/msgspec-0.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f98bd8962ad549c27d63845b50af3f53ec468b6318400c9f1adfe8b092d7b62f", size = 190485 }, - { url = "https://files.pythonhosted.org/packages/89/b0/1b9763938cfae12acf14b682fcf05c92855974d921a5a985ecc197d1c672/msgspec-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:43bbb237feab761b815ed9df43b266114203f53596f9b6e6f00ebd79d178cdf2", size = 183910 }, - { url = "https://files.pythonhosted.org/packages/87/81/0c8c93f0b92c97e326b279795f9c5b956c5a97af28ca0fbb9fd86c83737a/msgspec-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cfc033c02c3e0aec52b71710d7f84cb3ca5eb407ab2ad23d75631153fdb1f12", size = 210633 }, - { url = "https://files.pythonhosted.org/packages/d0/ef/c5422ce8af73928d194a6606f8ae36e93a52fd5e8df5abd366903a5ca8da/msgspec-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d911c442571605e17658ca2b416fd8579c5050ac9adc5e00c2cb3126c97f73bc", size = 213594 }, - { url = "https://files.pythonhosted.org/packages/19/2b/4137bc2ed45660444842d042be2cf5b18aa06efd2cda107cff18253b9653/msgspec-0.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:757b501fa57e24896cf40a831442b19a864f56d253679f34f260dcb002524a6c", size = 214053 }, - { url = "https://files.pythonhosted.org/packages/9d/e6/8ad51bdc806aac1dc501e8fe43f759f9ed7284043d722b53323ea421c360/msgspec-0.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5f0f65f29b45e2816d8bded36e6b837a4bf5fb60ec4bc3c625fa2c6da4124537", size = 219081 }, - { url = "https://files.pythonhosted.org/packages/b1/ef/27dd35a7049c9a4f4211c6cd6a8c9db0a50647546f003a5867827ec45391/msgspec-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:067f0de1c33cfa0b6a8206562efdf6be5985b988b53dd244a8e06f993f27c8c0", size = 187467 }, - { url = "https://files.pythonhosted.org/packages/3c/cb/2842c312bbe618d8fefc8b9cedce37f773cdc8fa453306546dba2c21fd98/msgspec-0.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f12d30dd6266557aaaf0aa0f9580a9a8fbeadfa83699c487713e355ec5f0bd86", size = 190498 }, - { url = "https://files.pythonhosted.org/packages/58/95/c40b01b93465e1a5f3b6c7d91b10fb574818163740cc3acbe722d1e0e7e4/msgspec-0.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82b2c42c1b9ebc89e822e7e13bbe9d17ede0c23c187469fdd9505afd5a481314", size = 183950 }, - { url = "https://files.pythonhosted.org/packages/e8/f0/5b764e066ce9aba4b70d1db8b087ea66098c7c27d59b9dd8a3532774d48f/msgspec-0.19.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19746b50be214a54239aab822964f2ac81e38b0055cca94808359d779338c10e", size = 210647 }, - { url = "https://files.pythonhosted.org/packages/9d/87/bc14f49bc95c4cb0dd0a8c56028a67c014ee7e6818ccdce74a4862af259b/msgspec-0.19.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60ef4bdb0ec8e4ad62e5a1f95230c08efb1f64f32e6e8dd2ced685bcc73858b5", size = 213563 }, - { url = "https://files.pythonhosted.org/packages/53/2f/2b1c2b056894fbaa975f68f81e3014bb447516a8b010f1bed3fb0e016ed7/msgspec-0.19.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac7f7c377c122b649f7545810c6cd1b47586e3aa3059126ce3516ac7ccc6a6a9", size = 213996 }, - { url = "https://files.pythonhosted.org/packages/aa/5a/4cd408d90d1417e8d2ce6a22b98a6853c1b4d7cb7669153e4424d60087f6/msgspec-0.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5bc1472223a643f5ffb5bf46ccdede7f9795078194f14edd69e3aab7020d327", size = 219087 }, - { url = "https://files.pythonhosted.org/packages/23/d8/f15b40611c2d5753d1abb0ca0da0c75348daf1252220e5dda2867bd81062/msgspec-0.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:317050bc0f7739cb30d257ff09152ca309bf5a369854bbf1e57dffc310c1f20f", size = 187432 }, + { url = "https://files.pythonhosted.org/packages/24/d4/2ec2567ac30dab072cce3e91fb17803c52f0a37aab6b0c24375d2b20a581/msgspec-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa77046904db764b0462036bc63ef71f02b75b8f72e9c9dd4c447d6da1ed8f8e", size = 187939, upload-time = "2024-12-27T17:39:32.347Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/18226e4328897f4f19875cb62bb9259fe47e901eade9d9376ab5f251a929/msgspec-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:047cfa8675eb3bad68722cfe95c60e7afabf84d1bd8938979dd2b92e9e4a9551", size = 182202, upload-time = "2024-12-27T17:39:33.633Z" }, + { url = "https://files.pythonhosted.org/packages/81/25/3a4b24d468203d8af90d1d351b77ea3cffb96b29492855cf83078f16bfe4/msgspec-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e78f46ff39a427e10b4a61614a2777ad69559cc8d603a7c05681f5a595ea98f7", size = 209029, upload-time = "2024-12-27T17:39:35.023Z" }, + { url = "https://files.pythonhosted.org/packages/85/2e/db7e189b57901955239f7689b5dcd6ae9458637a9c66747326726c650523/msgspec-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c7adf191e4bd3be0e9231c3b6dc20cf1199ada2af523885efc2ed218eafd011", size = 210682, upload-time = "2024-12-27T17:39:36.384Z" }, + { url = "https://files.pythonhosted.org/packages/03/97/7c8895c9074a97052d7e4a1cc1230b7b6e2ca2486714eb12c3f08bb9d284/msgspec-0.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f04cad4385e20be7c7176bb8ae3dca54a08e9756cfc97bcdb4f18560c3042063", size = 214003, upload-time = "2024-12-27T17:39:39.097Z" }, + { url = "https://files.pythonhosted.org/packages/61/61/e892997bcaa289559b4d5869f066a8021b79f4bf8e955f831b095f47a4cd/msgspec-0.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45c8fb410670b3b7eb884d44a75589377c341ec1392b778311acdbfa55187716", size = 216833, upload-time = "2024-12-27T17:39:41.203Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3d/71b2dffd3a1c743ffe13296ff701ee503feaebc3f04d0e75613b6563c374/msgspec-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:70eaef4934b87193a27d802534dc466778ad8d536e296ae2f9334e182ac27b6c", size = 186184, upload-time = "2024-12-27T17:39:43.702Z" }, + { url = "https://files.pythonhosted.org/packages/b2/5f/a70c24f075e3e7af2fae5414c7048b0e11389685b7f717bb55ba282a34a7/msgspec-0.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f98bd8962ad549c27d63845b50af3f53ec468b6318400c9f1adfe8b092d7b62f", size = 190485, upload-time = "2024-12-27T17:39:44.974Z" }, + { url = "https://files.pythonhosted.org/packages/89/b0/1b9763938cfae12acf14b682fcf05c92855974d921a5a985ecc197d1c672/msgspec-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:43bbb237feab761b815ed9df43b266114203f53596f9b6e6f00ebd79d178cdf2", size = 183910, upload-time = "2024-12-27T17:39:46.401Z" }, + { url = "https://files.pythonhosted.org/packages/87/81/0c8c93f0b92c97e326b279795f9c5b956c5a97af28ca0fbb9fd86c83737a/msgspec-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cfc033c02c3e0aec52b71710d7f84cb3ca5eb407ab2ad23d75631153fdb1f12", size = 210633, upload-time = "2024-12-27T17:39:49.099Z" }, + { url = "https://files.pythonhosted.org/packages/d0/ef/c5422ce8af73928d194a6606f8ae36e93a52fd5e8df5abd366903a5ca8da/msgspec-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d911c442571605e17658ca2b416fd8579c5050ac9adc5e00c2cb3126c97f73bc", size = 213594, upload-time = "2024-12-27T17:39:51.204Z" }, + { url = "https://files.pythonhosted.org/packages/19/2b/4137bc2ed45660444842d042be2cf5b18aa06efd2cda107cff18253b9653/msgspec-0.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:757b501fa57e24896cf40a831442b19a864f56d253679f34f260dcb002524a6c", size = 214053, upload-time = "2024-12-27T17:39:52.866Z" }, + { url = "https://files.pythonhosted.org/packages/9d/e6/8ad51bdc806aac1dc501e8fe43f759f9ed7284043d722b53323ea421c360/msgspec-0.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5f0f65f29b45e2816d8bded36e6b837a4bf5fb60ec4bc3c625fa2c6da4124537", size = 219081, upload-time = "2024-12-27T17:39:55.142Z" }, + { url = "https://files.pythonhosted.org/packages/b1/ef/27dd35a7049c9a4f4211c6cd6a8c9db0a50647546f003a5867827ec45391/msgspec-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:067f0de1c33cfa0b6a8206562efdf6be5985b988b53dd244a8e06f993f27c8c0", size = 187467, upload-time = "2024-12-27T17:39:56.531Z" }, + { url = "https://files.pythonhosted.org/packages/3c/cb/2842c312bbe618d8fefc8b9cedce37f773cdc8fa453306546dba2c21fd98/msgspec-0.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f12d30dd6266557aaaf0aa0f9580a9a8fbeadfa83699c487713e355ec5f0bd86", size = 190498, upload-time = "2024-12-27T17:40:00.427Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/c40b01b93465e1a5f3b6c7d91b10fb574818163740cc3acbe722d1e0e7e4/msgspec-0.19.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82b2c42c1b9ebc89e822e7e13bbe9d17ede0c23c187469fdd9505afd5a481314", size = 183950, upload-time = "2024-12-27T17:40:04.219Z" }, + { url = "https://files.pythonhosted.org/packages/e8/f0/5b764e066ce9aba4b70d1db8b087ea66098c7c27d59b9dd8a3532774d48f/msgspec-0.19.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19746b50be214a54239aab822964f2ac81e38b0055cca94808359d779338c10e", size = 210647, upload-time = "2024-12-27T17:40:05.606Z" }, + { url = "https://files.pythonhosted.org/packages/9d/87/bc14f49bc95c4cb0dd0a8c56028a67c014ee7e6818ccdce74a4862af259b/msgspec-0.19.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60ef4bdb0ec8e4ad62e5a1f95230c08efb1f64f32e6e8dd2ced685bcc73858b5", size = 213563, upload-time = "2024-12-27T17:40:10.516Z" }, + { url = "https://files.pythonhosted.org/packages/53/2f/2b1c2b056894fbaa975f68f81e3014bb447516a8b010f1bed3fb0e016ed7/msgspec-0.19.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac7f7c377c122b649f7545810c6cd1b47586e3aa3059126ce3516ac7ccc6a6a9", size = 213996, upload-time = "2024-12-27T17:40:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/aa/5a/4cd408d90d1417e8d2ce6a22b98a6853c1b4d7cb7669153e4424d60087f6/msgspec-0.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5bc1472223a643f5ffb5bf46ccdede7f9795078194f14edd69e3aab7020d327", size = 219087, upload-time = "2024-12-27T17:40:14.881Z" }, + { url = "https://files.pythonhosted.org/packages/23/d8/f15b40611c2d5753d1abb0ca0da0c75348daf1252220e5dda2867bd81062/msgspec-0.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:317050bc0f7739cb30d257ff09152ca309bf5a369854bbf1e57dffc310c1f20f", size = 187432, upload-time = "2024-12-27T17:40:16.256Z" }, ] [[package]] @@ -159,18 +196,18 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/98/df/77698abfac98571e65ffeb0c1fba8ffd692ab8458d617a0eed7d9a8d38f2/outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8", size = 21060 } +sdist = { url = "https://files.pythonhosted.org/packages/98/df/77698abfac98571e65ffeb0c1fba8ffd692ab8458d617a0eed7d9a8d38f2/outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8", size = 21060, upload-time = "2023-10-26T04:26:04.361Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/8b/5ab7257531a5d830fc8000c476e63c935488d74609b50f9384a643ec0a62/outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b", size = 10692 }, + { url = "https://files.pythonhosted.org/packages/55/8b/5ab7257531a5d830fc8000c476e63c935488d74609b50f9384a643ec0a62/outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b", size = 10692, upload-time = "2023-10-26T04:26:02.532Z" }, ] [[package]] name = "packaging" version = "24.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, ] [[package]] @@ -182,9 +219,9 @@ dependencies = [ { name = "pygments" }, { name = "tabcompleter" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/69/13/80da03638f62facbee76312ca9ee5941c017b080f2e4c6919fd4e87e16e3/pdbp-1.6.1.tar.gz", hash = "sha256:f4041642952a05df89664e166d5bd379607a0866ddd753c06874f65552bdf40b", size = 25322 } +sdist = { url = "https://files.pythonhosted.org/packages/69/13/80da03638f62facbee76312ca9ee5941c017b080f2e4c6919fd4e87e16e3/pdbp-1.6.1.tar.gz", hash = "sha256:f4041642952a05df89664e166d5bd379607a0866ddd753c06874f65552bdf40b", size = 25322, upload-time = "2024-11-07T15:36:43.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/29/93/d56fb9ba5569dc29d8263c72e46d21a2fd38741339ebf03f54cf7561828c/pdbp-1.6.1-py3-none-any.whl", hash = "sha256:f10bad2ee044c0e5c168cb0825abfdbdc01c50013e9755df5261b060bdd35c22", size = 21495 }, + { url = "https://files.pythonhosted.org/packages/29/93/d56fb9ba5569dc29d8263c72e46d21a2fd38741339ebf03f54cf7561828c/pdbp-1.6.1-py3-none-any.whl", hash = "sha256:f10bad2ee044c0e5c168cb0825abfdbdc01c50013e9755df5261b060bdd35c22", size = 21495, upload-time = "2024-11-07T15:36:41.061Z" }, ] [[package]] @@ -194,18 +231,18 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ptyprocess" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, ] [[package]] name = "pluggy" version = "1.5.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, ] [[package]] @@ -215,51 +252,66 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wcwidth" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087, upload-time = "2025-01-20T15:55:35.072Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, + { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816, upload-time = "2025-01-20T15:55:29.98Z" }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, ] [[package]] name = "ptyprocess" version = "0.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, ] [[package]] name = "pycparser" version = "2.22" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, ] [[package]] name = "pygments" version = "2.19.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, ] [[package]] name = "pyperclip" version = "1.9.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/30/23/2f0a3efc4d6a32f3b63cdff36cd398d9701d26cda58e3ab97ac79fb5e60d/pyperclip-1.9.0.tar.gz", hash = "sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310", size = 20961 } +sdist = { url = "https://files.pythonhosted.org/packages/30/23/2f0a3efc4d6a32f3b63cdff36cd398d9701d26cda58e3ab97ac79fb5e60d/pyperclip-1.9.0.tar.gz", hash = "sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310", size = 20961, upload-time = "2024-06-18T20:38:48.401Z" } [[package]] name = "pyreadline3" version = "3.5.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839 } +sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839, upload-time = "2024-09-19T02:40:10.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178 }, + { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178, upload-time = "2024-09-19T02:40:08.598Z" }, ] [[package]] @@ -272,36 +324,36 @@ dependencies = [ { name = "packaging" }, { name = "pluggy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634 }, + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" }, ] [[package]] name = "sniffio" version = "1.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, ] [[package]] name = "sortedcontainers" version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594 } +sdist = { url = "https://files.pythonhosted.org/packages/e8/c4/ba2f8066cceb6f23394729afe52f3bf7adec04bf9ed2c820b39e19299111/sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", size = 30594, upload-time = "2021-05-16T22:03:42.897Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575 }, + { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" }, ] [[package]] name = "stackscope" version = "0.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4a/fc/20dbb993353f31230138f3c63f3f0c881d1853e70d7a30cd68d2ba4cf1e2/stackscope-0.2.2.tar.gz", hash = "sha256:f508c93eb4861ada466dd3ff613ca203962ceb7587ad013759f15394e6a4e619", size = 90479 } +sdist = { url = "https://files.pythonhosted.org/packages/4a/fc/20dbb993353f31230138f3c63f3f0c881d1853e70d7a30cd68d2ba4cf1e2/stackscope-0.2.2.tar.gz", hash = "sha256:f508c93eb4861ada466dd3ff613ca203962ceb7587ad013759f15394e6a4e619", size = 90479, upload-time = "2024-02-27T22:02:15.831Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/5f/0a674fcafa03528089badb46419413f342537b5b57d2fefc9900fb8ee4e4/stackscope-0.2.2-py3-none-any.whl", hash = "sha256:c199b0cda738d39c993ee04eb01961b06b7e9aeb43ebf9fd6226cdd72ea9faf6", size = 80807 }, + { url = "https://files.pythonhosted.org/packages/f1/5f/0a674fcafa03528089badb46419413f342537b5b57d2fefc9900fb8ee4e4/stackscope-0.2.2-py3-none-any.whl", hash = "sha256:c199b0cda738d39c993ee04eb01961b06b7e9aeb43ebf9fd6226cdd72ea9faf6", size = 80807, upload-time = "2024-02-27T22:02:13.692Z" }, ] [[package]] @@ -311,9 +363,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyreadline3", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/73/1a/ed3544579628c5709bae6fae2255e94c6982a9ff77d42d8ba59fd2f3b21a/tabcompleter-1.4.0.tar.gz", hash = "sha256:7562a9938e62f8e7c3be612c3ac4e14c5ec4307b58ba9031c148260e866e8814", size = 10431 } +sdist = { url = "https://files.pythonhosted.org/packages/73/1a/ed3544579628c5709bae6fae2255e94c6982a9ff77d42d8ba59fd2f3b21a/tabcompleter-1.4.0.tar.gz", hash = "sha256:7562a9938e62f8e7c3be612c3ac4e14c5ec4307b58ba9031c148260e866e8814", size = 10431, upload-time = "2024-10-28T00:44:52.665Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl", hash = "sha256:d744aa735b49c0a6cc2fb8fcd40077fec47425e4388301010b14e6ce3311368b", size = 6725 }, + { url = "https://files.pythonhosted.org/packages/65/44/bb509c3d2c0b5a87e7a5af1d5917a402a32ff026f777a6d7cb6990746cbb/tabcompleter-1.4.0-py3-none-any.whl", hash = "sha256:d744aa735b49c0a6cc2fb8fcd40077fec47425e4388301010b14e6ce3311368b", size = 6725, upload-time = "2024-10-28T00:44:51.267Z" }, ] [[package]] @@ -321,6 +373,8 @@ name = "tractor" version = "0.1.0a6.dev0" source = { editable = "." } dependencies = [ + { name = "bidict" }, + { name = "cffi" }, { name = "colorlog" }, { name = "msgspec" }, { name = "pdbp" }, @@ -334,14 +388,18 @@ dev = [ { name = "greenback" }, { name = "pexpect" }, { name = "prompt-toolkit" }, + { name = "psutil" }, { name = "pyperclip" }, { name = "pytest" }, { name = "stackscope" }, + { name = "typing-extensions" }, { name = "xonsh" }, ] [package.metadata] requires-dist = [ + { name = "bidict", specifier = ">=0.23.1" }, + { name = "cffi", specifier = ">=1.17.1" }, { name = "colorlog", specifier = ">=6.8.2,<7" }, { name = "msgspec", specifier = ">=0.19.0" }, { name = "pdbp", specifier = ">=1.6,<2" }, @@ -355,9 +413,11 @@ dev = [ { name = "greenback", specifier = ">=1.2.1,<2" }, { name = "pexpect", specifier = ">=4.9.0,<5" }, { name = "prompt-toolkit", specifier = ">=3.0.50" }, + { name = "psutil", specifier = ">=7.0.0" }, { name = "pyperclip", specifier = ">=1.9.0" }, { name = "pytest", specifier = ">=8.3.5" }, { name = "stackscope", specifier = ">=0.2.2,<0.3" }, + { name = "typing-extensions", specifier = ">=4.14.1" }, { name = "xonsh", specifier = ">=0.19.2" }, ] @@ -368,9 +428,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "trio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f8/8e/fdd7bc467b40eedd0a5f2ed36b0d692c6e6f2473be00c8160e2e9f53adc1/tricycle-0.4.1.tar.gz", hash = "sha256:f56edb4b3e1bed3e2552b1b499b24a2dab47741e92e9b4d806acc5c35c9e6066", size = 41551 } +sdist = { url = "https://files.pythonhosted.org/packages/f8/8e/fdd7bc467b40eedd0a5f2ed36b0d692c6e6f2473be00c8160e2e9f53adc1/tricycle-0.4.1.tar.gz", hash = "sha256:f56edb4b3e1bed3e2552b1b499b24a2dab47741e92e9b4d806acc5c35c9e6066", size = 41551, upload-time = "2024-02-02T20:41:15.298Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d7/c6/7cc05d60e21c683df99167db071ce5d848f5063c2a63971a8443466f603e/tricycle-0.4.1-py3-none-any.whl", hash = "sha256:67900995a73e7445e2c70250cdca04a778d9c3923dd960a97ad4569085e0fb3f", size = 35316 }, + { url = "https://files.pythonhosted.org/packages/d7/c6/7cc05d60e21c683df99167db071ce5d848f5063c2a63971a8443466f603e/tricycle-0.4.1-py3-none-any.whl", hash = "sha256:67900995a73e7445e2c70250cdca04a778d9c3923dd960a97ad4569085e0fb3f", size = 35316, upload-time = "2024-02-02T20:41:14.108Z" }, ] [[package]] @@ -385,82 +445,91 @@ dependencies = [ { name = "sniffio" }, { name = "sortedcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/47/f62e62a1a6f37909aed0bf8f5d5411e06fa03846cfcb64540cd1180ccc9f/trio-0.29.0.tar.gz", hash = "sha256:ea0d3967159fc130acb6939a0be0e558e364fee26b5deeecc893a6b08c361bdf", size = 588952 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/47/f62e62a1a6f37909aed0bf8f5d5411e06fa03846cfcb64540cd1180ccc9f/trio-0.29.0.tar.gz", hash = "sha256:ea0d3967159fc130acb6939a0be0e558e364fee26b5deeecc893a6b08c361bdf", size = 588952, upload-time = "2025-02-14T07:13:50.724Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/55/c4d9bea8b3d7937901958f65124123512419ab0eb73695e5f382521abbfb/trio-0.29.0-py3-none-any.whl", hash = "sha256:d8c463f1a9cc776ff63e331aba44c125f423a5a13c684307e828d930e625ba66", size = 492920 }, + { url = "https://files.pythonhosted.org/packages/c9/55/c4d9bea8b3d7937901958f65124123512419ab0eb73695e5f382521abbfb/trio-0.29.0-py3-none-any.whl", hash = "sha256:d8c463f1a9cc776ff63e331aba44c125f423a5a13c684307e828d930e625ba66", size = 492920, upload-time = "2025-02-14T07:13:48.696Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" }, ] [[package]] name = "wcwidth" version = "0.2.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, ] [[package]] name = "wrapt" version = "1.17.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531 } +sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531, upload-time = "2025-01-14T10:35:45.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308 }, - { url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488 }, - { url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776 }, - { url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776 }, - { url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420 }, - { url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199 }, - { url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307 }, - { url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025 }, - { url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879 }, - { url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419 }, - { url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773 }, - { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799 }, - { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821 }, - { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919 }, - { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721 }, - { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899 }, - { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222 }, - { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707 }, - { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685 }, - { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 }, - { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 }, - { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 }, - { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 }, - { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 }, - { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 }, - { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 }, - { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 }, - { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 }, - { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 }, - { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 }, - { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 }, - { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 }, - { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 }, - { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 }, - { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 }, - { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 }, - { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 }, - { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 }, - { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 }, - { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 }, - { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 }, - { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 }, - { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 }, - { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 }, - { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 }, + { url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308, upload-time = "2025-01-14T10:33:33.992Z" }, + { url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488, upload-time = "2025-01-14T10:33:35.264Z" }, + { url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776, upload-time = "2025-01-14T10:33:38.28Z" }, + { url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776, upload-time = "2025-01-14T10:33:40.678Z" }, + { url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420, upload-time = "2025-01-14T10:33:41.868Z" }, + { url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199, upload-time = "2025-01-14T10:33:43.598Z" }, + { url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307, upload-time = "2025-01-14T10:33:48.499Z" }, + { url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025, upload-time = "2025-01-14T10:33:51.191Z" }, + { url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879, upload-time = "2025-01-14T10:33:52.328Z" }, + { url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419, upload-time = "2025-01-14T10:33:53.551Z" }, + { url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773, upload-time = "2025-01-14T10:33:56.323Z" }, + { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799, upload-time = "2025-01-14T10:33:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821, upload-time = "2025-01-14T10:33:59.334Z" }, + { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919, upload-time = "2025-01-14T10:34:04.093Z" }, + { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721, upload-time = "2025-01-14T10:34:07.163Z" }, + { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899, upload-time = "2025-01-14T10:34:09.82Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222, upload-time = "2025-01-14T10:34:11.258Z" }, + { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707, upload-time = "2025-01-14T10:34:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685, upload-time = "2025-01-14T10:34:15.043Z" }, + { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567, upload-time = "2025-01-14T10:34:16.563Z" }, + { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672, upload-time = "2025-01-14T10:34:17.727Z" }, + { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865, upload-time = "2025-01-14T10:34:19.577Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800, upload-time = "2025-01-14T10:34:21.571Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824, upload-time = "2025-01-14T10:34:22.999Z" }, + { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920, upload-time = "2025-01-14T10:34:25.386Z" }, + { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690, upload-time = "2025-01-14T10:34:28.058Z" }, + { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861, upload-time = "2025-01-14T10:34:29.167Z" }, + { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174, upload-time = "2025-01-14T10:34:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721, upload-time = "2025-01-14T10:34:32.91Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763, upload-time = "2025-01-14T10:34:34.903Z" }, + { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585, upload-time = "2025-01-14T10:34:36.13Z" }, + { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676, upload-time = "2025-01-14T10:34:37.962Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871, upload-time = "2025-01-14T10:34:39.13Z" }, + { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312, upload-time = "2025-01-14T10:34:40.604Z" }, + { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062, upload-time = "2025-01-14T10:34:45.011Z" }, + { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155, upload-time = "2025-01-14T10:34:47.25Z" }, + { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471, upload-time = "2025-01-14T10:34:50.934Z" }, + { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208, upload-time = "2025-01-14T10:34:52.297Z" }, + { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339, upload-time = "2025-01-14T10:34:53.489Z" }, + { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232, upload-time = "2025-01-14T10:34:55.327Z" }, + { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476, upload-time = "2025-01-14T10:34:58.055Z" }, + { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377, upload-time = "2025-01-14T10:34:59.3Z" }, + { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986, upload-time = "2025-01-14T10:35:00.498Z" }, + { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750, upload-time = "2025-01-14T10:35:03.378Z" }, + { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594, upload-time = "2025-01-14T10:35:44.018Z" }, ] [[package]] name = "xonsh" version = "0.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/68/4e/56e95a5e607eb3b0da37396f87cde70588efc8ef819ab16f02d5b8378dc4/xonsh-0.19.2.tar.gz", hash = "sha256:cfdd0680d954a2c3aefd6caddcc7143a3d06aa417ed18365a08219bb71b960b0", size = 799960 } +sdist = { url = "https://files.pythonhosted.org/packages/68/4e/56e95a5e607eb3b0da37396f87cde70588efc8ef819ab16f02d5b8378dc4/xonsh-0.19.2.tar.gz", hash = "sha256:cfdd0680d954a2c3aefd6caddcc7143a3d06aa417ed18365a08219bb71b960b0", size = 799960, upload-time = "2025-02-11T17:10:43.563Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6c/13/281094759df87b23b3c02dc4a16603ab08ea54d7f6acfeb69f3341137c7a/xonsh-0.19.2-py310-none-any.whl", hash = "sha256:ec7f163fd3a4943782aa34069d4e72793328c916a5975949dbec8536cbfc089b", size = 642301 }, - { url = "https://files.pythonhosted.org/packages/29/41/a51e4c3918fe9a293b150cb949b1b8c6d45eb17dfed480dcb76ea43df4e7/xonsh-0.19.2-py311-none-any.whl", hash = "sha256:53c45f7a767901f2f518f9b8dd60fc653e0498e56e89825e1710bb0859985049", size = 642286 }, - { url = "https://files.pythonhosted.org/packages/0a/93/9a77b731f492fac27c577dea2afb5a2bcc2a6a1c79be0c86c95498060270/xonsh-0.19.2-py312-none-any.whl", hash = "sha256:b24c619aa52b59eae4d35c4195dba9b19a2c548fb5c42c6f85f2b8ccb96807b5", size = 642386 }, - { url = "https://files.pythonhosted.org/packages/be/75/070324769c1ff88d971ce040f4f486339be98e0a365c8dd9991eb654265b/xonsh-0.19.2-py313-none-any.whl", hash = "sha256:c53ef6c19f781fbc399ed1b382b5c2aac2125010679a3b61d643978273c27df0", size = 642873 }, - { url = "https://files.pythonhosted.org/packages/fa/cb/2c7ccec54f5b0e73fdf7650e8336582ff0347d9001c5ef8271dc00c034fe/xonsh-0.19.2-py39-none-any.whl", hash = "sha256:bcc0225dc3847f1ed2f175dac6122fbcc54cea67d9c2dc2753d9615e2a5ff284", size = 634602 }, + { url = "https://files.pythonhosted.org/packages/6c/13/281094759df87b23b3c02dc4a16603ab08ea54d7f6acfeb69f3341137c7a/xonsh-0.19.2-py310-none-any.whl", hash = "sha256:ec7f163fd3a4943782aa34069d4e72793328c916a5975949dbec8536cbfc089b", size = 642301, upload-time = "2025-02-11T17:10:39.244Z" }, + { url = "https://files.pythonhosted.org/packages/29/41/a51e4c3918fe9a293b150cb949b1b8c6d45eb17dfed480dcb76ea43df4e7/xonsh-0.19.2-py311-none-any.whl", hash = "sha256:53c45f7a767901f2f518f9b8dd60fc653e0498e56e89825e1710bb0859985049", size = 642286, upload-time = "2025-02-11T17:10:41.678Z" }, + { url = "https://files.pythonhosted.org/packages/0a/93/9a77b731f492fac27c577dea2afb5a2bcc2a6a1c79be0c86c95498060270/xonsh-0.19.2-py312-none-any.whl", hash = "sha256:b24c619aa52b59eae4d35c4195dba9b19a2c548fb5c42c6f85f2b8ccb96807b5", size = 642386, upload-time = "2025-02-11T17:10:43.688Z" }, + { url = "https://files.pythonhosted.org/packages/be/75/070324769c1ff88d971ce040f4f486339be98e0a365c8dd9991eb654265b/xonsh-0.19.2-py313-none-any.whl", hash = "sha256:c53ef6c19f781fbc399ed1b382b5c2aac2125010679a3b61d643978273c27df0", size = 642873, upload-time = "2025-02-11T17:10:39.297Z" }, + { url = "https://files.pythonhosted.org/packages/fa/cb/2c7ccec54f5b0e73fdf7650e8336582ff0347d9001c5ef8271dc00c034fe/xonsh-0.19.2-py39-none-any.whl", hash = "sha256:bcc0225dc3847f1ed2f175dac6122fbcc54cea67d9c2dc2753d9615e2a5ff284", size = 634602, upload-time = "2025-02-11T17:10:37.004Z" }, ]