2021-12-13 18:08:32 +00:00
|
|
|
# tractor: structured concurrent "actors".
|
|
|
|
|
# Copyright 2018-eternity Tyler Goodlet.
|
|
|
|
|
|
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
|
# (at your option) any later version.
|
|
|
|
|
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
|
|
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
|
|
2021-10-04 15:02:51 +00:00
|
|
|
'''
|
|
|
|
|
Async context manager primitives with hard ``trio``-aware semantics
|
|
|
|
|
|
|
|
|
|
'''
|
2024-06-28 23:28:12 +00:00
|
|
|
from __future__ import annotations
|
2026-04-06 04:07:40 +00:00
|
|
|
from collections import defaultdict
|
2024-06-28 23:28:12 +00:00
|
|
|
from contextlib import (
|
|
|
|
|
asynccontextmanager as acm,
|
|
|
|
|
)
|
2022-10-06 18:41:56 +00:00
|
|
|
import inspect
|
2024-06-28 23:28:12 +00:00
|
|
|
from types import ModuleType
|
2021-10-27 18:01:39 +00:00
|
|
|
from typing import (
|
|
|
|
|
Any,
|
|
|
|
|
AsyncContextManager,
|
|
|
|
|
AsyncGenerator,
|
2021-11-28 17:48:26 +00:00
|
|
|
AsyncIterator,
|
2021-12-15 18:42:47 +00:00
|
|
|
Callable,
|
2021-10-27 18:01:39 +00:00
|
|
|
Hashable,
|
|
|
|
|
Sequence,
|
|
|
|
|
TypeVar,
|
2024-06-28 23:28:12 +00:00
|
|
|
TYPE_CHECKING,
|
2021-10-27 18:01:39 +00:00
|
|
|
)
|
2021-10-04 15:02:51 +00:00
|
|
|
|
|
|
|
|
import trio
|
Mv core mods to `runtime/`, `spawn/`, `discovery/` subpkgs
Restructure the flat `tractor/` top-level private mods
into (more nested) subpackages:
- `runtime/`: `_runtime`, `_portal`, `_rpc`, `_state`,
`_supervise`
- `spawn/`: `_spawn`, `_entry`, `_forkserver_override`,
`_mp_fixup_main`
- `discovery/`: `_addr`, `_discovery`, `_multiaddr`
Each subpkg `__init__.py` is kept lazy (no eager
imports) to avoid circular import issues.
Also,
- update all intra-pkg imports across ~35 mods to use
the new subpkg paths (e.g. `from .runtime._state`
instead of `from ._state`)
(this patch was generated in some part by [`claude-code`][claude-code-gh])
[claude-code-gh]: https://github.com/anthropics/claude-code
2026-03-23 22:42:16 +00:00
|
|
|
from tractor.runtime._state import current_actor
|
2024-03-13 22:41:24 +00:00
|
|
|
from tractor.log import get_logger
|
2026-04-06 04:07:40 +00:00
|
|
|
import tractor
|
2025-06-13 03:16:29 +00:00
|
|
|
# from ._beg import collapse_eg
|
2025-07-27 00:10:24 +00:00
|
|
|
# from ._taskc import (
|
|
|
|
|
# maybe_raise_from_masking_exc,
|
|
|
|
|
# )
|
2025-06-13 03:16:29 +00:00
|
|
|
|
2021-10-27 18:01:39 +00:00
|
|
|
|
2024-06-28 23:28:12 +00:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from tractor import ActorNursery
|
|
|
|
|
|
2021-10-27 18:01:39 +00:00
|
|
|
|
2026-02-09 18:15:47 +00:00
|
|
|
log = get_logger()
|
2021-10-04 15:02:51 +00:00
|
|
|
|
|
|
|
|
# A regular invariant generic type
|
|
|
|
|
T = TypeVar("T")
|
|
|
|
|
|
|
|
|
|
|
2022-10-06 18:41:56 +00:00
|
|
|
@acm
|
|
|
|
|
async def maybe_open_nursery(
|
2024-06-28 23:28:12 +00:00
|
|
|
nursery: trio.Nursery|ActorNursery|None = None,
|
2022-10-06 18:41:56 +00:00
|
|
|
shield: bool = False,
|
2024-06-28 23:28:12 +00:00
|
|
|
lib: ModuleType = trio,
|
|
|
|
|
|
2025-02-26 23:06:06 +00:00
|
|
|
**kwargs, # proxy thru
|
|
|
|
|
|
2022-10-06 18:41:56 +00:00
|
|
|
) -> AsyncGenerator[trio.Nursery, Any]:
|
|
|
|
|
'''
|
|
|
|
|
Create a new nursery if None provided.
|
|
|
|
|
|
|
|
|
|
Blocks on exit as expected if no input nursery is provided.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
if nursery is not None:
|
|
|
|
|
yield nursery
|
|
|
|
|
else:
|
2025-02-26 23:06:06 +00:00
|
|
|
async with lib.open_nursery(**kwargs) as nursery:
|
2025-04-13 17:16:39 +00:00
|
|
|
if lib == trio:
|
|
|
|
|
nursery.cancel_scope.shield = shield
|
2022-10-06 18:41:56 +00:00
|
|
|
yield nursery
|
|
|
|
|
|
|
|
|
|
|
2021-10-16 15:34:59 +00:00
|
|
|
async def _enter_and_wait(
|
2021-10-04 15:02:51 +00:00
|
|
|
mngr: AsyncContextManager[T],
|
2021-10-17 05:33:37 +00:00
|
|
|
unwrapped: dict[int, T],
|
2021-10-04 15:02:51 +00:00
|
|
|
all_entered: trio.Event,
|
2021-10-24 17:48:36 +00:00
|
|
|
parent_exit: trio.Event,
|
2023-09-27 18:05:22 +00:00
|
|
|
seed: int,
|
2021-10-24 17:48:36 +00:00
|
|
|
|
2021-10-17 05:33:37 +00:00
|
|
|
) -> None:
|
2021-10-24 17:48:36 +00:00
|
|
|
'''
|
|
|
|
|
Open the async context manager deliver it's value
|
2021-10-04 15:02:51 +00:00
|
|
|
to this task's spawner and sleep until cancelled.
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
async with mngr as value:
|
2021-10-17 05:33:37 +00:00
|
|
|
unwrapped[id(mngr)] = value
|
2021-10-04 15:02:51 +00:00
|
|
|
|
2023-09-27 18:05:22 +00:00
|
|
|
if all(
|
|
|
|
|
val != seed
|
|
|
|
|
for val in unwrapped.values()
|
|
|
|
|
):
|
2021-10-04 15:02:51 +00:00
|
|
|
all_entered.set()
|
|
|
|
|
|
2021-10-24 17:48:36 +00:00
|
|
|
await parent_exit.wait()
|
2021-10-04 15:02:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@acm
|
2021-10-24 17:48:36 +00:00
|
|
|
async def gather_contexts(
|
2021-10-17 05:33:37 +00:00
|
|
|
mngrs: Sequence[AsyncContextManager[T]],
|
2021-10-24 17:48:36 +00:00
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
# caller can provide their own scope
|
|
|
|
|
tn: trio.Nursery|None = None,
|
|
|
|
|
|
2023-09-27 18:05:22 +00:00
|
|
|
) -> AsyncGenerator[
|
|
|
|
|
tuple[
|
|
|
|
|
T | None,
|
|
|
|
|
...
|
|
|
|
|
],
|
|
|
|
|
None,
|
|
|
|
|
]:
|
2021-10-24 17:48:36 +00:00
|
|
|
'''
|
2025-06-13 03:16:29 +00:00
|
|
|
Concurrently enter a sequence of async context managers (`acm`s),
|
|
|
|
|
each scheduled in a separate `trio.Task` and deliver their
|
|
|
|
|
unwrapped `yield`-ed values in the same order once all `@acm`s
|
|
|
|
|
in every task have entered.
|
2024-06-28 23:28:12 +00:00
|
|
|
|
2025-06-13 03:16:29 +00:00
|
|
|
On exit, all `acm`s are subsequently and concurrently exited with
|
|
|
|
|
**no order guarantees**.
|
2021-10-24 17:48:36 +00:00
|
|
|
|
2024-06-28 23:28:12 +00:00
|
|
|
This function is somewhat similar to a batch of non-blocking
|
|
|
|
|
calls to `contextlib.AsyncExitStack.enter_async_context()`
|
|
|
|
|
(inside a loop) *in combo with* a `asyncio.gather()` to get the
|
|
|
|
|
`.__aenter__()`-ed values, except the managers are both
|
2025-06-13 03:16:29 +00:00
|
|
|
concurrently entered and exited and *cancellation-just-works™*.
|
2021-10-24 17:48:36 +00:00
|
|
|
|
|
|
|
|
'''
|
2023-09-27 18:05:22 +00:00
|
|
|
seed: int = id(mngrs)
|
2026-04-06 04:07:40 +00:00
|
|
|
unwrapped: dict[int, T|None] = {}.fromkeys(
|
2023-09-27 18:05:22 +00:00
|
|
|
(id(mngr) for mngr in mngrs),
|
|
|
|
|
seed,
|
|
|
|
|
)
|
2021-10-04 15:02:51 +00:00
|
|
|
|
|
|
|
|
all_entered = trio.Event()
|
2021-10-24 17:48:36 +00:00
|
|
|
parent_exit = trio.Event()
|
2021-10-04 15:02:51 +00:00
|
|
|
|
2022-12-12 00:51:53 +00:00
|
|
|
# XXX: ensure greedy sequence of manager instances
|
|
|
|
|
# since a lazy inline generator doesn't seem to work
|
|
|
|
|
# with `async with` syntax.
|
|
|
|
|
mngrs = list(mngrs)
|
|
|
|
|
|
|
|
|
|
if not mngrs:
|
|
|
|
|
raise ValueError(
|
2023-09-27 18:05:22 +00:00
|
|
|
'`.trionics.gather_contexts()` input mngrs is empty?\n'
|
2025-06-13 03:16:29 +00:00
|
|
|
'\n'
|
2023-09-27 18:05:22 +00:00
|
|
|
'Did try to use inline generator syntax?\n'
|
2025-07-27 00:10:24 +00:00
|
|
|
'Check that list({mngrs}) works!\n'
|
|
|
|
|
# 'or sequence-type intead!\n'
|
|
|
|
|
# 'Use a non-lazy iterator or sequence-type intead!\n'
|
2022-12-12 00:51:53 +00:00
|
|
|
)
|
|
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
try:
|
|
|
|
|
async with (
|
|
|
|
|
#
|
|
|
|
|
# ?TODO, does including these (eg-collapsing,
|
|
|
|
|
# taskc-unmasking) improve tb noise-reduction/legibility?
|
|
|
|
|
#
|
|
|
|
|
# collapse_eg(),
|
|
|
|
|
maybe_open_nursery(
|
|
|
|
|
nursery=tn,
|
|
|
|
|
) as tn,
|
|
|
|
|
# maybe_raise_from_masking_exc(),
|
|
|
|
|
):
|
|
|
|
|
for mngr in mngrs:
|
|
|
|
|
tn.start_soon(
|
|
|
|
|
_enter_and_wait,
|
|
|
|
|
mngr,
|
|
|
|
|
unwrapped,
|
|
|
|
|
all_entered,
|
|
|
|
|
parent_exit,
|
|
|
|
|
seed,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# deliver control to caller once all ctx-managers have
|
|
|
|
|
# started (yielded back to us).
|
|
|
|
|
await all_entered.wait()
|
2022-11-10 00:10:59 +00:00
|
|
|
yield tuple(unwrapped.values())
|
|
|
|
|
parent_exit.set()
|
2021-10-27 18:01:39 +00:00
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
finally:
|
|
|
|
|
# XXX NOTE: this is ABSOLUTELY REQUIRED to avoid
|
|
|
|
|
# the following wacky bug:
|
|
|
|
|
# <tractorbugurlhere>
|
|
|
|
|
parent_exit.set()
|
|
|
|
|
|
2021-10-27 18:01:39 +00:00
|
|
|
|
|
|
|
|
# Per actor task caching helpers.
|
|
|
|
|
# Further potential examples of interest:
|
|
|
|
|
# https://gist.github.com/njsmith/cf6fc0a97f53865f2c671659c88c1798#file-cache-py-L8
|
|
|
|
|
|
2021-12-15 13:16:31 +00:00
|
|
|
class _Cache:
|
2021-10-27 18:01:39 +00:00
|
|
|
'''
|
2021-12-15 13:16:31 +00:00
|
|
|
Globally (actor-processs scoped) cached, task access to
|
|
|
|
|
a kept-alive-while-in-use async resource.
|
2021-10-27 18:01:39 +00:00
|
|
|
|
|
|
|
|
'''
|
2025-08-19 23:59:05 +00:00
|
|
|
service_tn: trio.Nursery|None = None
|
2022-08-24 16:03:13 +00:00
|
|
|
locks: dict[Hashable, trio.Lock] = {}
|
2026-04-06 04:07:40 +00:00
|
|
|
users: defaultdict[
|
|
|
|
|
tuple|Hashable,
|
|
|
|
|
int,
|
|
|
|
|
] = defaultdict(int)
|
2021-10-27 18:01:39 +00:00
|
|
|
values: dict[Any, Any] = {}
|
|
|
|
|
resources: dict[
|
2021-12-15 22:21:41 +00:00
|
|
|
Hashable,
|
2021-11-28 17:48:26 +00:00
|
|
|
tuple[trio.Nursery, trio.Event]
|
2021-10-27 18:01:39 +00:00
|
|
|
] = {}
|
2022-10-07 16:45:15 +00:00
|
|
|
# nurseries: dict[int, trio.Nursery] = {}
|
2025-08-19 23:59:05 +00:00
|
|
|
no_more_users: trio.Event|None = None
|
2021-10-27 18:01:39 +00:00
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
async def run_ctx(
|
|
|
|
|
cls,
|
|
|
|
|
mng,
|
2021-12-15 18:42:47 +00:00
|
|
|
ctx_key: tuple,
|
2024-03-13 22:41:24 +00:00
|
|
|
task_status: trio.TaskStatus[T] = trio.TASK_STATUS_IGNORED,
|
2021-10-27 18:01:39 +00:00
|
|
|
|
|
|
|
|
) -> None:
|
2025-07-29 19:13:38 +00:00
|
|
|
try:
|
|
|
|
|
async with mng as value:
|
|
|
|
|
_, no_more_users = cls.resources[ctx_key]
|
|
|
|
|
try:
|
|
|
|
|
cls.values[ctx_key] = value
|
|
|
|
|
task_status.started(value)
|
|
|
|
|
await no_more_users.wait()
|
|
|
|
|
finally:
|
|
|
|
|
value = cls.values.pop(ctx_key)
|
|
|
|
|
finally:
|
|
|
|
|
# discard nursery ref so it won't be re-used (an error)?
|
2026-04-06 04:07:40 +00:00
|
|
|
_rsrcs = cls.resources.pop(ctx_key)
|
|
|
|
|
log.error(
|
|
|
|
|
f'Popping ctx resources\n'
|
|
|
|
|
f'{_rsrcs}\n'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class _UnresolvedCtx:
|
|
|
|
|
'''
|
|
|
|
|
Placeholder for the mabye-value delivered from some `acm_func`,
|
|
|
|
|
once (first) entered by a `maybe_open_context()` task.
|
|
|
|
|
|
|
|
|
|
Enables internal teardown logic conditioned on whether the
|
|
|
|
|
context was actually entered successfully vs. cancelled prior.
|
|
|
|
|
|
|
|
|
|
'''
|
2021-10-27 18:01:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
@acm
|
|
|
|
|
async def maybe_open_context(
|
2021-12-15 18:42:47 +00:00
|
|
|
acm_func: Callable[..., AsyncContextManager[T]],
|
|
|
|
|
|
|
|
|
|
# XXX: used as cache key after conversion to tuple
|
|
|
|
|
# and all embedded values must also be hashable
|
2021-12-15 22:21:41 +00:00
|
|
|
kwargs: dict = {},
|
2026-04-06 04:07:40 +00:00
|
|
|
key: Hashable|Callable[..., Hashable] = None,
|
2021-10-27 18:01:39 +00:00
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
# caller can provide their own scope
|
|
|
|
|
tn: trio.Nursery|None = None,
|
|
|
|
|
|
2021-11-28 17:48:26 +00:00
|
|
|
) -> AsyncIterator[tuple[bool, T]]:
|
2021-10-27 18:01:39 +00:00
|
|
|
'''
|
2024-06-28 23:28:12 +00:00
|
|
|
Maybe open an async-context-manager (acm) if there is not already
|
|
|
|
|
a `_Cached` version for the provided (input) `key` for *this* actor.
|
|
|
|
|
|
|
|
|
|
Return the `_Cached` instance on a _Cache hit.
|
2021-10-27 18:01:39 +00:00
|
|
|
|
|
|
|
|
'''
|
2026-04-06 04:07:40 +00:00
|
|
|
fid: int = id(acm_func)
|
2022-10-06 18:41:56 +00:00
|
|
|
if inspect.isfunction(key):
|
2026-04-06 04:07:40 +00:00
|
|
|
ctx_key = (
|
|
|
|
|
fid,
|
|
|
|
|
key(**kwargs)
|
|
|
|
|
)
|
2022-10-06 18:41:56 +00:00
|
|
|
else:
|
2026-04-06 04:07:40 +00:00
|
|
|
ctx_key = (
|
|
|
|
|
fid,
|
|
|
|
|
key or tuple(kwargs.items())
|
|
|
|
|
)
|
2022-10-06 18:41:56 +00:00
|
|
|
|
|
|
|
|
# yielded output
|
2026-04-06 04:07:40 +00:00
|
|
|
# sentinel = object()
|
|
|
|
|
yielded: Any = _UnresolvedCtx
|
2024-01-23 16:13:07 +00:00
|
|
|
lock_registered: bool = False
|
2021-12-14 15:55:27 +00:00
|
|
|
|
2022-08-24 16:03:13 +00:00
|
|
|
# Lock resource acquisition around task racing / ``trio``'s
|
|
|
|
|
# scheduler protocol.
|
|
|
|
|
# NOTE: the lock is target context manager func specific in order
|
|
|
|
|
# to allow re-entrant use cases where one `maybe_open_context()`
|
2026-04-06 04:07:40 +00:00
|
|
|
# wrapped factory may want to call into another.
|
|
|
|
|
task: trio.Task = trio.lowlevel.current_task()
|
|
|
|
|
lock: trio.StrictFIFOLock|None = _Cache.locks.get(
|
|
|
|
|
# fid
|
|
|
|
|
ctx_key
|
|
|
|
|
)
|
|
|
|
|
if not lock:
|
|
|
|
|
lock = _Cache.locks[
|
|
|
|
|
ctx_key
|
|
|
|
|
# fid
|
|
|
|
|
] = trio.StrictFIFOLock()
|
|
|
|
|
# lock = _Cache.locks[fid] = trio.Lock()
|
|
|
|
|
header: str = 'Allocated NEW lock for @acm_func,\n'
|
|
|
|
|
lock_registered: bool = True
|
|
|
|
|
else:
|
|
|
|
|
await trio.lowlevel.checkpoint()
|
|
|
|
|
header: str = 'Reusing OLD lock for @acm_func,\n'
|
|
|
|
|
|
|
|
|
|
log.debug(
|
|
|
|
|
f'{header}'
|
|
|
|
|
f'Acquiring..\n'
|
|
|
|
|
f'task={task!r}\n'
|
|
|
|
|
f'fid={fid!r}\n'
|
|
|
|
|
f'acm_func={acm_func}\n'
|
|
|
|
|
)
|
2022-08-24 16:03:13 +00:00
|
|
|
await lock.acquire()
|
2026-04-06 04:07:40 +00:00
|
|
|
log.debug(
|
|
|
|
|
f'Acquir lock..\n'
|
|
|
|
|
f'task={task!r}\n'
|
|
|
|
|
f'fid={fid!r}\n'
|
|
|
|
|
f'acm_func={acm_func}\n'
|
|
|
|
|
)
|
2022-08-24 16:03:13 +00:00
|
|
|
|
2022-10-07 16:45:15 +00:00
|
|
|
# XXX: one singleton nursery per actor and we want to
|
|
|
|
|
# have it not be closed until all consumers have exited (which is
|
|
|
|
|
# currently difficult to implement any other way besides using our
|
|
|
|
|
# pre-allocated runtime instance..)
|
2025-07-27 00:10:24 +00:00
|
|
|
if tn:
|
|
|
|
|
# TODO, assert tn is eventual parent of this task!
|
|
|
|
|
task: trio.Task = trio.lowlevel.current_task()
|
|
|
|
|
task_tn: trio.Nursery = task.parent_nursery
|
|
|
|
|
if not tn._cancel_status.encloses(
|
|
|
|
|
task_tn._cancel_status
|
|
|
|
|
):
|
|
|
|
|
raise RuntimeError(
|
|
|
|
|
f'Mis-nesting of task under provided {tn} !?\n'
|
|
|
|
|
f'Current task is NOT a child(-ish)!!\n'
|
|
|
|
|
f'\n'
|
|
|
|
|
f'task: {task}\n'
|
|
|
|
|
f'task_tn: {task_tn}\n'
|
|
|
|
|
)
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
service_tn = tn
|
2025-07-27 00:10:24 +00:00
|
|
|
else:
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
service_tn: trio.Nursery = current_actor()._service_tn
|
2022-10-07 16:45:15 +00:00
|
|
|
|
|
|
|
|
# TODO: is there any way to allocate
|
|
|
|
|
# a 'stays-open-till-last-task-finshed nursery?
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
# service_tn: trio.Nursery
|
|
|
|
|
# async with maybe_open_nursery(_Cache.service_tn) as service_tn:
|
|
|
|
|
# _Cache.service_tn = service_tn
|
2022-10-07 16:45:15 +00:00
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
cache_miss_ke: KeyError|None = None
|
|
|
|
|
maybe_taskc: trio.Cancelled|None = None
|
2021-10-27 18:01:39 +00:00
|
|
|
try:
|
|
|
|
|
# **critical section** that should prevent other tasks from
|
2021-12-15 13:16:31 +00:00
|
|
|
# checking the _Cache until complete otherwise the scheduler
|
2021-12-15 18:42:47 +00:00
|
|
|
# may switch and by accident we create more then one resource.
|
2022-10-06 18:41:56 +00:00
|
|
|
yielded = _Cache.values[ctx_key]
|
2026-04-06 04:07:40 +00:00
|
|
|
# XXX^ should key-err if not-yet-allocated
|
2021-10-27 18:01:39 +00:00
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
except KeyError as _ke:
|
|
|
|
|
# XXX, stay mutexed up to cache-miss yield
|
2025-07-20 17:26:25 +00:00
|
|
|
try:
|
2025-07-27 00:10:24 +00:00
|
|
|
cache_miss_ke = _ke
|
|
|
|
|
log.debug(
|
|
|
|
|
f'Allocating new @acm-func entry\n'
|
|
|
|
|
f'ctx_key={ctx_key}\n'
|
|
|
|
|
f'acm_func={acm_func}\n'
|
|
|
|
|
)
|
2026-04-06 04:07:40 +00:00
|
|
|
# await tractor.pause()
|
2025-07-27 00:10:24 +00:00
|
|
|
mngr = acm_func(**kwargs)
|
|
|
|
|
resources = _Cache.resources
|
2026-04-06 04:07:40 +00:00
|
|
|
entry: tuple|None = resources.get(ctx_key)
|
|
|
|
|
if entry:
|
|
|
|
|
service_tn, ev = entry
|
|
|
|
|
# XXX, trace this.
|
|
|
|
|
# await tractor.pause(shield=True)
|
|
|
|
|
raise RuntimeError(
|
|
|
|
|
f'Caching resources ALREADY exist?!\n'
|
|
|
|
|
f'ctx_key={ctx_key!r}\n'
|
|
|
|
|
f'acm_func={acm_func}\n'
|
|
|
|
|
f'task: {task}\n'
|
|
|
|
|
)
|
|
|
|
|
|
Heh, add back `Actor._root_tn`, it has purpose..
Turns out I didn't read my own internals docs/comments and despite it
not being used previously, this adds the real use case: a root,
per-actor, scope which ensures parent comms are the last conc-thing to
be cancelled.
Also, the impl changes here make the test from 6410e45 (or wtv
it's rebased to) pass, i.e. we can support crash handling in the root
actor despite the root-tn having been (self) cancelled.
Superficial adjustments,
- rename `Actor._service_n` -> `._service_tn` everywhere.
- add asserts to `._runtime.async_main()` which ensure that the any
`.trionics.maybe_open_nursery()` calls against optionally passed
`._[root/service]_tn` are allocated-if-not-provided (the
`._service_tn`-case being an i-guess-prep-for-the-future-anti-pattern
Bp).
- obvi adjust all internal usage to match new naming.
Serious/real-use-case changes,
- add (back) a `Actor._root_tn` which sits a scope "above" the
service-tn and is either,
+ assigned in `._runtime.async_main()` for sub-actors OR,
+ assigned in `._root.open_root_actor()` for the root actor.
**THE primary reason** to keep this "upper" tn is that during
a full-`Actor`-cancellation condition (more details below) we want to
ensure that the IPC connection with a sub-actor's parent is **the last
thing to be cancelled**; this is most simply implemented by ensuring
that the `Actor._parent_chan: .ipc.Channel` is handled in an upper
scope in `_rpc.process_messages()`-subtask-terms.
- for the root actor this `root_tn` is allocated in `.open_root_actor()`
body and assigned as such.
- extend `Actor.cancel_soon()` to be cohesive with this entire teardown
"policy" by scheduling a task in the `._root_tn` which,
* waits for the `._service_tn` to complete and then,
* cancels the `._root_tn.cancel_scope`,
* includes "sclangy" console logging throughout.
2025-08-19 23:24:20 +00:00
|
|
|
resources[ctx_key] = (service_tn, trio.Event())
|
|
|
|
|
yielded: Any = await service_tn.start(
|
2025-07-20 17:26:25 +00:00
|
|
|
_Cache.run_ctx,
|
|
|
|
|
mngr,
|
|
|
|
|
ctx_key,
|
|
|
|
|
)
|
2026-04-06 04:07:40 +00:00
|
|
|
_Cache.users[ctx_key] += 1
|
2025-07-20 17:26:25 +00:00
|
|
|
finally:
|
|
|
|
|
# XXX, since this runs from an `except` it's a checkpoint
|
2026-04-06 04:07:40 +00:00
|
|
|
# which can be `trio.Cancelled`-masked.
|
2025-07-20 17:26:25 +00:00
|
|
|
#
|
|
|
|
|
# NOTE, in that case the mutex is never released by the
|
|
|
|
|
# (first and) caching task and **we can't** simply shield
|
|
|
|
|
# bc that will inf-block on the `await
|
|
|
|
|
# no_more_users.wait()`.
|
|
|
|
|
#
|
|
|
|
|
# SO just always unlock!
|
|
|
|
|
lock.release()
|
|
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
try:
|
|
|
|
|
yield (
|
|
|
|
|
False, # cache_hit = "no"
|
|
|
|
|
yielded,
|
|
|
|
|
)
|
|
|
|
|
except trio.Cancelled as taskc:
|
|
|
|
|
maybe_taskc = taskc
|
|
|
|
|
log.cancel(
|
|
|
|
|
f'Cancelled from cache-miss entry\n'
|
|
|
|
|
f'\n'
|
|
|
|
|
f'ctx_key: {ctx_key!r}\n'
|
|
|
|
|
f'mngr: {mngr!r}\n'
|
|
|
|
|
)
|
|
|
|
|
# XXX, always unset ke from cancelled context
|
|
|
|
|
# since we never consider it a masked exc case!
|
|
|
|
|
# - bc this can be called directly ty `._rpc._invoke()`?
|
|
|
|
|
#
|
|
|
|
|
if maybe_taskc.__context__ is cache_miss_ke:
|
|
|
|
|
maybe_taskc.__context__ = None
|
|
|
|
|
|
|
|
|
|
raise taskc
|
2021-12-14 15:55:27 +00:00
|
|
|
else:
|
2026-04-06 04:07:40 +00:00
|
|
|
# XXX, cached-entry-path
|
|
|
|
|
_Cache.users[ctx_key] += 1
|
2025-07-20 17:26:25 +00:00
|
|
|
log.debug(
|
2024-06-28 23:28:12 +00:00
|
|
|
f'Re-using cached resource for user {_Cache.users}\n\n'
|
|
|
|
|
f'{ctx_key!r} -> {type(yielded)}\n'
|
|
|
|
|
|
|
|
|
|
# TODO: make this work with values but without
|
|
|
|
|
# `msgspec.Struct` causing frickin crashes on field-type
|
|
|
|
|
# lookups..
|
|
|
|
|
# f'{ctx_key!r} -> {yielded!r}\n'
|
2024-06-18 18:40:26 +00:00
|
|
|
)
|
2022-08-24 16:03:13 +00:00
|
|
|
lock.release()
|
2025-07-15 20:12:06 +00:00
|
|
|
yield (
|
|
|
|
|
True, # cache_hit = "yes"
|
|
|
|
|
yielded,
|
|
|
|
|
)
|
2021-12-14 15:55:27 +00:00
|
|
|
|
2021-10-27 18:01:39 +00:00
|
|
|
finally:
|
2025-07-27 00:10:24 +00:00
|
|
|
if lock.locked():
|
|
|
|
|
stats: trio.LockStatistics = lock.statistics()
|
2026-04-06 04:07:40 +00:00
|
|
|
owner: trio.Task|None = stats.owner
|
2025-07-27 00:10:24 +00:00
|
|
|
log.error(
|
2026-04-06 04:07:40 +00:00
|
|
|
f'Lock never released by last owner={owner!r} !?\n'
|
2025-07-27 00:10:24 +00:00
|
|
|
f'{stats}\n'
|
2026-04-06 04:07:40 +00:00
|
|
|
f'\n'
|
|
|
|
|
f'task={task!r}\n'
|
|
|
|
|
f'fid={fid!r}\n'
|
|
|
|
|
f'acm_func={acm_func}\n'
|
|
|
|
|
|
2025-07-27 00:10:24 +00:00
|
|
|
)
|
2026-04-06 04:07:40 +00:00
|
|
|
# XXX, trace it.
|
|
|
|
|
# await tractor.pause(shield=True)
|
2025-07-27 00:10:24 +00:00
|
|
|
|
2026-04-06 04:07:40 +00:00
|
|
|
_Cache.users[ctx_key] -= 1
|
2021-10-27 18:01:39 +00:00
|
|
|
|
2026-04-06 04:07:40 +00:00
|
|
|
if yielded is not _UnresolvedCtx:
|
2021-10-27 18:01:39 +00:00
|
|
|
# if no more consumers, teardown the client
|
2026-04-06 04:07:40 +00:00
|
|
|
if _Cache.users[ctx_key] <= 0:
|
|
|
|
|
log.debug(
|
|
|
|
|
f'De-allocating @acm-func entry\n'
|
|
|
|
|
f'ctx_key={ctx_key!r}\n'
|
|
|
|
|
f'acm_func={acm_func!r}\n'
|
|
|
|
|
)
|
2021-12-15 13:16:31 +00:00
|
|
|
|
2021-12-16 16:00:57 +00:00
|
|
|
# XXX: if we're cancelled we the entry may have never
|
|
|
|
|
# been entered since the nursery task was killed.
|
|
|
|
|
# _, no_more_users = _Cache.resources[ctx_key]
|
|
|
|
|
entry = _Cache.resources.get(ctx_key)
|
|
|
|
|
if entry:
|
|
|
|
|
_, no_more_users = entry
|
|
|
|
|
no_more_users.set()
|
2022-08-24 16:03:13 +00:00
|
|
|
|
2024-01-23 16:13:07 +00:00
|
|
|
if lock_registered:
|
2026-04-06 04:07:40 +00:00
|
|
|
maybe_lock = _Cache.locks.pop(
|
|
|
|
|
ctx_key,
|
|
|
|
|
None,
|
|
|
|
|
)
|
2024-01-23 16:13:07 +00:00
|
|
|
if maybe_lock is None:
|
|
|
|
|
log.error(
|
2026-04-06 04:07:40 +00:00
|
|
|
f'Resource lock for {ctx_key} ALREADY POPPED?'
|
2024-01-23 16:13:07 +00:00
|
|
|
)
|