ComfyUI/comfy/execution_context.py
doctorpangloss a38968f098 Improvements to execution
- Validation errors that occur early in the lifecycle of prompt
   execution now get propagated to their callers in the
   EmbeddedComfyClient. This includes error messages about missing node
   classes.
 - The execution context now includes the node_id and the prompt_id
 - Latent previews are now sent with a node_id. This is not backwards
   compatible with old frontends.
 - Dependency execution errors are now modeled correctly.
 - Distributed progress encodes image previews with node and prompt IDs.
 - Typing for models
 - The frontend was updated to use node IDs with previews
 - Improvements to torch.compile experiments
 - Some controlnet_aux nodes were upstreamed
2024-10-10 19:30:18 -07:00

44 lines
1.1 KiB
Python

from __future__ import annotations
from contextlib import contextmanager
from contextvars import ContextVar
from typing import NamedTuple, Optional
from .component_model.executor_types import ExecutorToClientProgress
from .distributed.server_stub import ServerStub
_current_context = ContextVar("comfyui_execution_context")
class ExecutionContext(NamedTuple):
server: ExecutorToClientProgress
node_id: Optional[str] = None
task_id: Optional[str] = None
_empty_execution_context = ExecutionContext(ServerStub())
def current_execution_context() -> ExecutionContext:
try:
return _current_context.get()
except LookupError:
return _empty_execution_context
@contextmanager
def new_execution_context(ctx: ExecutionContext):
token = _current_context.set(ctx)
try:
yield
finally:
_current_context.reset(token)
@contextmanager
def context_execute_node(node_id: str, prompt_id: str):
current_ctx = current_execution_context()
new_ctx = ExecutionContext(current_ctx.server, node_id, prompt_id)
with new_execution_context(new_ctx):
yield