From fb1feed1a2cc5c5d3121a98b840f27fb20a0a58e Mon Sep 17 00:00:00 2001 From: doctorpangloss <@hiddenswitch.com> Date: Fri, 16 Aug 2024 14:30:27 -0700 Subject: [PATCH] Move commit registration --- comfy/caching.py | 305 ----------- comfy/cmd/execution.py | 1111 ---------------------------------------- comfy/graph.py | 234 --------- comfy/graph_utils.py | 143 ------ 4 files changed, 1793 deletions(-) delete mode 100644 comfy/caching.py delete mode 100644 comfy/cmd/execution.py delete mode 100644 comfy/graph.py delete mode 100644 comfy/graph_utils.py diff --git a/comfy/caching.py b/comfy/caching.py deleted file mode 100644 index c8ac1b58d..000000000 --- a/comfy/caching.py +++ /dev/null @@ -1,305 +0,0 @@ -import itertools -from typing import Sequence, Mapping - -from .cmd.execution import nodes -from .graph import DynamicPrompt -from .graph_utils import is_link - - -class CacheKeySet: - def __init__(self, dynprompt, node_ids, is_changed_cache): - self.keys = {} - self.subcache_keys = {} - - def add_keys(self, node_ids): - raise NotImplementedError() - - def all_node_ids(self): - return set(self.keys.keys()) - - def get_used_keys(self): - return self.keys.values() - - def get_used_subcache_keys(self): - return self.subcache_keys.values() - - def get_data_key(self, node_id): - return self.keys.get(node_id, None) - - def get_subcache_key(self, node_id): - return self.subcache_keys.get(node_id, None) - - -class Unhashable: - def __init__(self): - self.value = float("NaN") - - -def to_hashable(obj): - # So that we don't infinitely recurse since frozenset and tuples - # are Sequences. - if isinstance(obj, (int, float, str, bool, type(None))): - return obj - elif isinstance(obj, Mapping): - return frozenset([(to_hashable(k), to_hashable(v)) for k, v in sorted(obj.items())]) - elif isinstance(obj, Sequence): - return frozenset(zip(itertools.count(), [to_hashable(i) for i in obj])) - else: - # TODO - Support other objects like tensors? - return Unhashable() - - -class CacheKeySetID(CacheKeySet): - def __init__(self, dynprompt, node_ids, is_changed_cache): - super().__init__(dynprompt, node_ids, is_changed_cache) - self.dynprompt = dynprompt - self.add_keys(node_ids) - - def add_keys(self, node_ids): - for node_id in node_ids: - if node_id in self.keys: - continue - node = self.dynprompt.get_node(node_id) - self.keys[node_id] = (node_id, node["class_type"]) - self.subcache_keys[node_id] = (node_id, node["class_type"]) - - -class CacheKeySetInputSignature(CacheKeySet): - def __init__(self, dynprompt, node_ids, is_changed_cache): - super().__init__(dynprompt, node_ids, is_changed_cache) - self.dynprompt = dynprompt - self.is_changed_cache = is_changed_cache - self.add_keys(node_ids) - - def include_node_id_in_input(self) -> bool: - return False - - def add_keys(self, node_ids): - for node_id in node_ids: - if node_id in self.keys: - continue - node = self.dynprompt.get_node(node_id) - self.keys[node_id] = self.get_node_signature(self.dynprompt, node_id) - self.subcache_keys[node_id] = (node_id, node["class_type"]) - - def get_node_signature(self, dynprompt, node_id): - signature = [] - ancestors, order_mapping = self.get_ordered_ancestry(dynprompt, node_id) - signature.append(self.get_immediate_node_signature(dynprompt, node_id, order_mapping)) - for ancestor_id in ancestors: - signature.append(self.get_immediate_node_signature(dynprompt, ancestor_id, order_mapping)) - return to_hashable(signature) - - def get_immediate_node_signature(self, dynprompt, node_id, ancestor_order_mapping): - node = dynprompt.get_node(node_id) - class_type = node["class_type"] - class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - signature = [class_type, self.is_changed_cache.get(node_id)] - if self.include_node_id_in_input() or (hasattr(class_def, "NOT_IDEMPOTENT") and class_def.NOT_IDEMPOTENT): - signature.append(node_id) - inputs = node["inputs"] - for key in sorted(inputs.keys()): - if is_link(inputs[key]): - (ancestor_id, ancestor_socket) = inputs[key] - ancestor_index = ancestor_order_mapping[ancestor_id] - signature.append((key, ("ANCESTOR", ancestor_index, ancestor_socket))) - else: - signature.append((key, inputs[key])) - return signature - - # This function returns a list of all ancestors of the given node. The order of the list is - # deterministic based on which specific inputs the ancestor is connected by. - def get_ordered_ancestry(self, dynprompt, node_id): - ancestors = [] - order_mapping = {} - self.get_ordered_ancestry_internal(dynprompt, node_id, ancestors, order_mapping) - return ancestors, order_mapping - - def get_ordered_ancestry_internal(self, dynprompt, node_id, ancestors, order_mapping): - inputs = dynprompt.get_node(node_id)["inputs"] - input_keys = sorted(inputs.keys()) - for key in input_keys: - if is_link(inputs[key]): - ancestor_id = inputs[key][0] - if ancestor_id not in order_mapping: - ancestors.append(ancestor_id) - order_mapping[ancestor_id] = len(ancestors) - 1 - self.get_ordered_ancestry_internal(dynprompt, ancestor_id, ancestors, order_mapping) - - -class BasicCache: - def __init__(self, key_class): - self.key_class = key_class - self.initialized = False - self.dynprompt: DynamicPrompt - self.cache_key_set: CacheKeySet - self.cache = {} - self.subcaches = {} - - def set_prompt(self, dynprompt, node_ids, is_changed_cache): - self.dynprompt = dynprompt - self.cache_key_set = self.key_class(dynprompt, node_ids, is_changed_cache) - self.is_changed_cache = is_changed_cache - self.initialized = True - - def all_node_ids(self): - assert self.initialized - node_ids = self.cache_key_set.all_node_ids() - for subcache in self.subcaches.values(): - node_ids = node_ids.union(subcache.all_node_ids()) - return node_ids - - def _clean_cache(self): - preserve_keys = set(self.cache_key_set.get_used_keys()) - to_remove = [] - for key in self.cache: - if key not in preserve_keys: - to_remove.append(key) - for key in to_remove: - del self.cache[key] - - def _clean_subcaches(self): - preserve_subcaches = set(self.cache_key_set.get_used_subcache_keys()) - - to_remove = [] - for key in self.subcaches: - if key not in preserve_subcaches: - to_remove.append(key) - for key in to_remove: - del self.subcaches[key] - - def clean_unused(self): - assert self.initialized - self._clean_cache() - self._clean_subcaches() - - def _set_immediate(self, node_id, value): - assert self.initialized - cache_key = self.cache_key_set.get_data_key(node_id) - self.cache[cache_key] = value - - def _get_immediate(self, node_id): - if not self.initialized: - return None - cache_key = self.cache_key_set.get_data_key(node_id) - if cache_key in self.cache: - return self.cache[cache_key] - else: - return None - - def _ensure_subcache(self, node_id, children_ids): - subcache_key = self.cache_key_set.get_subcache_key(node_id) - subcache = self.subcaches.get(subcache_key, None) - if subcache is None: - subcache = BasicCache(self.key_class) - self.subcaches[subcache_key] = subcache - subcache.set_prompt(self.dynprompt, children_ids, self.is_changed_cache) - return subcache - - def _get_subcache(self, node_id): - assert self.initialized - subcache_key = self.cache_key_set.get_subcache_key(node_id) - if subcache_key in self.subcaches: - return self.subcaches[subcache_key] - else: - return None - - def recursive_debug_dump(self): - result = [] - for key in self.cache: - result.append({"key": key, "value": self.cache[key]}) - for key in self.subcaches: - result.append({"subcache_key": key, "subcache": self.subcaches[key].recursive_debug_dump()}) - return result - - -class HierarchicalCache(BasicCache): - def __init__(self, key_class): - super().__init__(key_class) - - def _get_cache_for(self, node_id): - assert self.dynprompt is not None - parent_id = self.dynprompt.get_parent_node_id(node_id) - if parent_id is None: - return self - - hierarchy = [] - while parent_id is not None: - hierarchy.append(parent_id) - parent_id = self.dynprompt.get_parent_node_id(parent_id) - - cache = self - for parent_id in reversed(hierarchy): - cache = cache._get_subcache(parent_id) - if cache is None: - return None - return cache - - def get(self, node_id): - cache = self._get_cache_for(node_id) - if cache is None: - return None - return cache._get_immediate(node_id) - - def set(self, node_id, value): - cache = self._get_cache_for(node_id) - assert cache is not None - cache._set_immediate(node_id, value) - - def ensure_subcache_for(self, node_id, children_ids): - cache = self._get_cache_for(node_id) - assert cache is not None - return cache._ensure_subcache(node_id, children_ids) - - -class LRUCache(BasicCache): - def __init__(self, key_class, max_size=100): - super().__init__(key_class) - self.max_size = max_size - self.min_generation = 0 - self.generation = 0 - self.used_generation = {} - self.children = {} - - def set_prompt(self, dynprompt, node_ids, is_changed_cache): - super().set_prompt(dynprompt, node_ids, is_changed_cache) - self.generation += 1 - for node_id in node_ids: - self._mark_used(node_id) - - def clean_unused(self): - while len(self.cache) > self.max_size and self.min_generation < self.generation: - self.min_generation += 1 - to_remove = [key for key in self.cache if self.used_generation[key] < self.min_generation] - for key in to_remove: - del self.cache[key] - del self.used_generation[key] - if key in self.children: - del self.children[key] - self._clean_subcaches() - - def get(self, node_id): - self._mark_used(node_id) - return self._get_immediate(node_id) - - def _mark_used(self, node_id): - cache_key = self.cache_key_set.get_data_key(node_id) - if cache_key is not None: - self.used_generation[cache_key] = self.generation - - def set(self, node_id, value): - self._mark_used(node_id) - return self._set_immediate(node_id, value) - - def ensure_subcache_for(self, node_id, children_ids): - # Just uses subcaches for tracking 'live' nodes - super()._ensure_subcache(node_id, children_ids) - - self.cache_key_set.add_keys(children_ids) - self._mark_used(node_id) - cache_key = self.cache_key_set.get_data_key(node_id) - self.children[cache_key] = [] - for child_id in children_ids: - self._mark_used(child_id) - self.children[cache_key].append(self.cache_key_set.get_data_key(child_id)) - return self diff --git a/comfy/cmd/execution.py b/comfy/cmd/execution.py deleted file mode 100644 index 70af064dd..000000000 --- a/comfy/cmd/execution.py +++ /dev/null @@ -1,1111 +0,0 @@ -from __future__ import annotations - -import copy -import heapq -import inspect -import logging -import sys -import threading -import time -import traceback -import typing -from os import PathLike -from typing import List, Optional, Tuple - -import lazy_object_proxy -import torch -from opentelemetry.trace import get_current_span, StatusCode, Status - -from .main_pre import tracer -from .. import interruption -from .. import model_management -from ..component_model.abstract_prompt_queue import AbstractPromptQueue -from ..component_model.executor_types import ExecutorToClientProgress, ValidationTuple, ValidateInputsTuple, \ - ValidationErrorDict, NodeErrorsDictValue, ValidationErrorExtraInfoDict, FormattedValue, RecursiveExecutionTuple, \ - RecursiveExecutionErrorDetails, RecursiveExecutionErrorDetailsInterrupted, ExecutionResult, DuplicateNodeError, \ - HistoryResultDict -from ..component_model.files import canonicalize_path -from ..component_model.queue_types import QueueTuple, HistoryEntry, QueueItem, MAXIMUM_HISTORY_SIZE, ExecutionStatus -from ..execution_context import new_execution_context, ExecutionContext -from ..nodes.package import import_all_nodes_in_workspace -from ..nodes.package_typing import ExportedNodes, InputTypeSpec, FloatSpecOptions, IntSpecOptions - -# ideally this would be passed in from main, but the way this is authored, we can't easily pass nodes down to the -# various functions that are declared here. It should have been a context in the first place. -nodes: ExportedNodes = lazy_object_proxy.Proxy(import_all_nodes_in_workspace) - -# order matters -from ..graph import get_input_info, ExecutionList, DynamicPrompt, ExecutionBlocker -from ..graph_utils import is_link, GraphBuilder -from ..caching import HierarchicalCache, LRUCache, CacheKeySetInputSignature, CacheKeySetID - - -class IsChangedCache: - def __init__(self, dynprompt, outputs_cache): - self.dynprompt = dynprompt - self.outputs_cache = outputs_cache - self.is_changed = {} - - def get(self, node_id): - if node_id in self.is_changed: - return self.is_changed[node_id] - - node = self.dynprompt.get_node(node_id) - class_type = node["class_type"] - class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - if not hasattr(class_def, "IS_CHANGED"): - self.is_changed[node_id] = False - return self.is_changed[node_id] - - if "is_changed" in node: - self.is_changed[node_id] = node["is_changed"] - return self.is_changed[node_id] - - input_data_all, _ = get_input_data(node["inputs"], class_def, node_id, self.outputs_cache) - try: - is_changed = map_node_over_list(class_def, input_data_all, "IS_CHANGED") - node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed] - except: - node["is_changed"] = float("NaN") - finally: - self.is_changed[node_id] = node["is_changed"] - return self.is_changed[node_id] - - -class CacheSet: - def __init__(self, lru_size=None): - if lru_size is None or lru_size == 0: - self.init_classic_cache() - else: - self.init_lru_cache(lru_size) - self.all = [self.outputs, self.ui, self.objects] - - # Useful for those with ample RAM/VRAM -- allows experimenting without - # blowing away the cache every time - def init_lru_cache(self, cache_size): - self.outputs = LRUCache(CacheKeySetInputSignature, max_size=cache_size) - self.ui = LRUCache(CacheKeySetInputSignature, max_size=cache_size) - self.objects = HierarchicalCache(CacheKeySetID) - - # Performs like the old cache -- dump data ASAP - def init_classic_cache(self): - self.outputs = HierarchicalCache(CacheKeySetInputSignature) - self.ui = HierarchicalCache(CacheKeySetInputSignature) - self.objects = HierarchicalCache(CacheKeySetID) - - def recursive_debug_dump(self): - result = { - "outputs": self.outputs.recursive_debug_dump(), - "ui": self.ui.recursive_debug_dump(), - } - return result - - -def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data=None): - if extra_data is None: - extra_data = {} - if outputs is None: - outputs = {} - valid_inputs = class_def.INPUT_TYPES() - input_data_all = {} - missing_keys = {} - for x in inputs: - input_data = inputs[x] - input_type, input_category, input_info = get_input_info(class_def, x) - - def mark_missing(): - missing_keys[x] = True - input_data_all[x] = (None,) - - if is_link(input_data) and (not input_info or not input_info.get("rawLink", False)): - input_unique_id = input_data[0] - output_index = input_data[1] - if outputs is None: - mark_missing() - continue # This might be a lazily-evaluated input - cached_output = outputs.get(input_unique_id) - if cached_output is None: - mark_missing() - continue - if output_index >= len(cached_output): - mark_missing() - continue - obj = cached_output[output_index] - input_data_all[x] = obj - elif input_category is not None: - input_data_all[x] = [input_data] - - # todo: this should be retrieved from the execution context - if "hidden" in valid_inputs: - h = valid_inputs["hidden"] - for x in h: - if h[x] == "PROMPT": - input_data_all[x] = [dynprompt.get_original_prompt() if dynprompt is not None else {}] - if h[x] == "DYNPROMPT": - input_data_all[x] = [dynprompt] - if h[x] == "EXTRA_PNGINFO": - input_data_all[x] = [extra_data.get('extra_pnginfo', None)] - if h[x] == "UNIQUE_ID": - input_data_all[x] = [unique_id] - return input_data_all, missing_keys - - -@tracer.start_as_current_span("Execute Node") -def map_node_over_list(obj, input_data_all: typing.Dict[str, typing.Any], func: str, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None): - span = get_current_span() - class_type = obj.__class__.__name__ - span.set_attribute("class_type", class_type) - if input_data_all is not None: - for kwarg_name, kwarg_value in input_data_all.items(): - if isinstance(kwarg_value, str) or isinstance(kwarg_value, bool) or isinstance(kwarg_value, int) or isinstance(kwarg_value, float): - span.set_attribute(f"input_data_all.{kwarg_name}", kwarg_value) - else: - try: - items_to_display = [] - if hasattr(kwarg_value, "shape"): - # if the object has a shape attribute (likely a NumPy array or similar), get up to the first ten elements - flat_values = kwarg_value.flatten() if hasattr(kwarg_value, "flatten") else kwarg_value - items_to_display = [flat_values[i] for i in range(min(10, flat_values.size))] - elif hasattr(kwarg_value, "__getitem__") and hasattr(kwarg_value, "__len__"): - # If the object is indexable and has a length, get the first ten items - items_to_display = [kwarg_value[i] for i in range(min(10, len(kwarg_value)))] - - filtered_items = [ - item for item in items_to_display if isinstance(item, (str, bool, int, float)) - ] - - if filtered_items: - span.set_attribute(f"input_data_all.{kwarg_name}", filtered_items) - except TypeError: - pass - # check if node wants the lists - input_is_list = getattr(obj, "INPUT_IS_LIST", False) - - if len(input_data_all) == 0: - max_len_input = 0 - else: - max_len_input = max(len(x) for x in input_data_all.values()) - - # get a slice of inputs, repeat last input when list isn't long enough - def slice_dict(d, i): - return {k: v[i if len(v) > i else -1] for k, v in d.items()} - - results = [] - - def process_inputs(inputs, index=None): - if allow_interrupt: - interruption.throw_exception_if_processing_interrupted() - execution_block = None - for k, v in inputs.items(): - if isinstance(v, ExecutionBlocker): - execution_block = execution_block_cb(v) if execution_block_cb else v - break - if execution_block is None: - if pre_execute_cb is not None and index is not None: - pre_execute_cb(index) - results.append(getattr(obj, func)(**inputs)) - else: - results.append(execution_block) - - if input_is_list: - process_inputs(input_data_all, 0) - elif max_len_input == 0: - process_inputs({}) - else: - for i in range(max_len_input): - input_dict = slice_dict(input_data_all, i) - process_inputs(input_dict, i) - return results - - -def merge_result_data(results, obj): - # check which outputs need concatenating - output = [] - output_is_list = [False] * len(results[0]) - if hasattr(obj, "OUTPUT_IS_LIST"): - output_is_list = obj.OUTPUT_IS_LIST - - # merge node execution results - for i, is_list in zip(range(len(results[0])), output_is_list): - if is_list: - output.append([x for o in results for x in o[i]]) - else: - output.append([o[i] for o in results]) - return output - - -def get_output_data(obj, input_data_all, execution_block_cb=None, pre_execute_cb=None): - results = [] - uis = [] - subgraph_results = [] - return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) - has_subgraph = False - for i in range(len(return_values)): - r = return_values[i] - if isinstance(r, dict): - if 'ui' in r: - uis.append(r['ui']) - if 'expand' in r: - # Perform an expansion, but do not append results - has_subgraph = True - new_graph = r['expand'] - result = r.get("result", None) - if isinstance(result, ExecutionBlocker): - result = tuple([result] * len(obj.RETURN_TYPES)) - subgraph_results.append((new_graph, result)) - elif 'result' in r: - result = r.get("result", None) - if isinstance(result, ExecutionBlocker): - result = tuple([result] * len(obj.RETURN_TYPES)) - results.append(result) - subgraph_results.append((None, result)) - else: - if isinstance(r, ExecutionBlocker): - r = tuple([r] * len(obj.RETURN_TYPES)) - results.append(r) - subgraph_results.append((None, r)) - - if has_subgraph: - output = subgraph_results - elif len(results) > 0: - output = merge_result_data(results, obj) - else: - output = [] - ui = dict() - if len(uis) > 0: - ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()} - return output, ui, has_subgraph - - -def format_value(x) -> FormattedValue: - if x is None: - return None - elif isinstance(x, (int, float, bool, str)): - return x - else: - return str(x) - - -def execute(server, dynprompt, caches, current_item, extra_data, executed, prompt_id, execution_list, pending_subgraph_results): - unique_id = current_item - real_node_id = dynprompt.get_real_node_id(unique_id) - display_node_id = dynprompt.get_display_node_id(unique_id) - parent_node_id = dynprompt.get_parent_node_id(unique_id) - inputs = dynprompt.get_node(unique_id)['inputs'] - class_type = dynprompt.get_node(unique_id)['class_type'] - class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - if caches.outputs.get(unique_id) is not None: - if server.client_id is not None: - cached_output = caches.ui.get(unique_id) or {} - server.send_sync("executed", {"node": unique_id, "display_node": display_node_id, "output": cached_output.get("output", None), "prompt_id": prompt_id}, server.client_id) - return RecursiveExecutionTuple(ExecutionResult.SUCCESS, None, None) - - input_data_all = None - try: - if unique_id in pending_subgraph_results: - cached_results = pending_subgraph_results[unique_id] - resolved_outputs = [] - for is_subgraph, result in cached_results: - if not is_subgraph: - resolved_outputs.append(result) - else: - resolved_output = [] - for r in result: - if is_link(r): - source_node, source_output = r[0], r[1] - node_output = caches.outputs.get(source_node)[source_output] - for o in node_output: - resolved_output.append(o) - - else: - resolved_output.append(r) - resolved_outputs.append(tuple(resolved_output)) - output_data = merge_result_data(resolved_outputs, class_def) - output_ui = [] - has_subgraph = False - else: - input_data_all, missing_keys = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data) - if server.client_id is not None: - server.last_node_id = display_node_id - server.send_sync("executing", {"node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id}, server.client_id) - - obj = caches.objects.get(unique_id) - if obj is None: - obj = class_def() - caches.objects.set(unique_id, obj) - - if hasattr(obj, "check_lazy_status"): - required_inputs = map_node_over_list(obj, input_data_all, "check_lazy_status", allow_interrupt=True) - required_inputs = set(sum([r for r in required_inputs if isinstance(r, list)], [])) - required_inputs = [x for x in required_inputs if isinstance(x, str) and ( - x not in input_data_all or x in missing_keys - )] - if len(required_inputs) > 0: - for i in required_inputs: - execution_list.make_input_strong_link(unique_id, i) - return (ExecutionResult.PENDING, None, None) - - def execution_block_cb(block): - if block.message is not None: - mes = { - "prompt_id": prompt_id, - "node_id": unique_id, - "node_type": class_type, - "executed": list(executed), - - "exception_message": f"Execution Blocked: {block.message}", - "exception_type": "ExecutionBlocked", - "traceback": [], - "current_inputs": [], - "current_outputs": [], - } - server.send_sync("execution_error", mes, server.client_id) - return ExecutionBlocker(None) - else: - return block - - def pre_execute_cb(call_index): - GraphBuilder.set_default_prefix(unique_id, call_index, 0) - - output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) - if len(output_ui) > 0: - caches.ui.set(unique_id, { - "meta": { - "node_id": unique_id, - "display_node": display_node_id, - "parent_node": parent_node_id, - "real_node_id": real_node_id, - }, - "output": output_ui - }) - if server.client_id is not None: - server.send_sync("executed", {"node": unique_id, "display_node": display_node_id, "output": output_ui, "prompt_id": prompt_id}, - server.client_id) - if has_subgraph: - cached_outputs = [] - new_node_ids = [] - new_output_ids = [] - new_output_links = [] - for i in range(len(output_data)): - new_graph, node_outputs = output_data[i] - if new_graph is None: - cached_outputs.append((False, node_outputs)) - else: - # Check for conflicts - for node_id in new_graph.keys(): - if dynprompt.has_node(node_id): - raise DuplicateNodeError(f"Attempt to add duplicate node {node_id}. Ensure node ids are unique and deterministic or use graph_utils.GraphBuilder.") - for node_id, node_info in new_graph.items(): - new_node_ids.append(node_id) - display_id = node_info.get("override_display_id", unique_id) - dynprompt.add_ephemeral_node(node_id, node_info, unique_id, display_id) - # Figure out if the newly created node is an output node - class_type = node_info["class_type"] - class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - if hasattr(class_def, 'OUTPUT_NODE') and class_def.OUTPUT_NODE == True: - new_output_ids.append(node_id) - for i in range(len(node_outputs)): - if is_link(node_outputs[i]): - from_node_id, from_socket = node_outputs[i][0], node_outputs[i][1] - new_output_links.append((from_node_id, from_socket)) - cached_outputs.append((True, node_outputs)) - new_node_ids = set(new_node_ids) - for cache in caches.all: - cache.ensure_subcache_for(unique_id, new_node_ids).clean_unused() - for node_id in new_output_ids: - execution_list.add_node(node_id) - for link in new_output_links: - execution_list.add_strong_link(link[0], link[1], unique_id) - pending_subgraph_results[unique_id] = cached_outputs - return (ExecutionResult.PENDING, None, None) - caches.outputs.set(unique_id, output_data) - except interruption.InterruptProcessingException as iex: - logging.info("Processing interrupted") - - # skip formatting inputs/outputs - error_details: RecursiveExecutionErrorDetailsInterrupted = { - "node_id": real_node_id, - } - - return RecursiveExecutionTuple(ExecutionResult.FAILURE, error_details, iex) - except Exception as ex: - typ, _, tb = sys.exc_info() - exception_type = full_type_name(typ) - input_data_formatted = {} - if input_data_all is not None: - input_data_formatted = {} - for name, inputs in input_data_all.items(): - input_data_formatted[name] = [format_value(x) for x in inputs] - - logging.error("An error occurred while executing a workflow", exc_info=ex) - logging.error(traceback.format_exc()) - - error_details: RecursiveExecutionErrorDetails = { - "node_id": real_node_id, - "exception_message": str(ex), - "exception_type": exception_type, - "traceback": traceback.format_tb(tb), - "current_inputs": input_data_formatted - } - - if isinstance(ex, model_management.OOM_EXCEPTION): - logging.error("Got an OOM, unloading all loaded models.") - model_management.unload_all_models() - - return RecursiveExecutionTuple(ExecutionResult.FAILURE, error_details, ex) - - executed.add(unique_id) - - return ExecutionResult.SUCCESS, None, None - - -class PromptExecutor: - def __init__(self, server: ExecutorToClientProgress, lru_size=None): - self.success = None - self.lru_size = lru_size - self.server = server - self.raise_exceptions = False - self.reset() - self.history_result: HistoryResultDict | None = None - - def reset(self): - self.success = True - self.caches = CacheSet(self.lru_size) - self.status_messages = [] - - def add_message(self, event, data: dict, broadcast: bool): - data = { - **data, - # todo: use a real time library - "timestamp": int(time.time() * 1000), - } - self.status_messages.append((event, data)) - if self.server.client_id is not None or broadcast: - self.server.send_sync(event, data, self.server.client_id) - - def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex): - current_span = get_current_span() - current_span.set_status(Status(StatusCode.ERROR)) - current_span.record_exception(ex) - - node_id = error["node_id"] - class_type = prompt[node_id]["class_type"] - - # First, send back the status to the frontend depending - # on the exception type - if isinstance(ex, interruption.InterruptProcessingException): - mes = { - "prompt_id": prompt_id, - "node_id": node_id, - "node_type": class_type, - "executed": list(executed), - } - self.add_message("execution_interrupted", mes, broadcast=True) - else: - mes = { - "prompt_id": prompt_id, - "node_id": node_id, - "node_type": class_type, - "executed": list(executed), - "exception_message": error["exception_message"], - "exception_type": error["exception_type"], - "traceback": error["traceback"], - "current_inputs": error["current_inputs"], - "current_outputs": list(current_outputs), - } - self.add_message("execution_error", mes, broadcast=False) - - if ex is not None and self.raise_exceptions: - raise ex - - def execute(self, prompt, prompt_id, extra_data=None, execute_outputs: List[str] = None): - with new_execution_context(ExecutionContext(self.server)): - self._execute_inner(prompt, prompt_id, extra_data, execute_outputs) - - def _execute_inner(self, prompt, prompt_id, extra_data=None, execute_outputs: List[str] = None): - if execute_outputs is None: - execute_outputs = [] - if extra_data is None: - extra_data = {} - interruption.interrupt_current_processing(False) - - if "client_id" in extra_data: - self.server.client_id = extra_data["client_id"] - else: - self.server.client_id = None - - self.status_messages = [] - self.add_message("execution_start", {"prompt_id": prompt_id}, broadcast=False) - - with torch.inference_mode(): - dynamic_prompt = DynamicPrompt(prompt) - is_changed_cache = IsChangedCache(dynamic_prompt, self.caches.outputs) - for cache in self.caches.all: - cache.set_prompt(dynamic_prompt, prompt.keys(), is_changed_cache) - cache.clean_unused() - - cached_nodes = [] - for node_id in prompt: - if self.caches.outputs.get(node_id) is not None: - cached_nodes.append(node_id) - - model_management.cleanup_models(keep_clone_weights_loaded=True) - self.add_message("execution_cached", - { "nodes": cached_nodes, "prompt_id": prompt_id}, - broadcast=False) - pending_subgraph_results = {} - executed = set() - execution_list = ExecutionList(dynamic_prompt, self.caches.outputs) - current_outputs = self.caches.outputs.all_node_ids() - for node_id in list(execute_outputs): - execution_list.add_node(node_id) - - while not execution_list.is_empty(): - node_id, error, ex = execution_list.stage_node_execution() - if error is not None: - self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex) - break - - result, error, ex = execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results) - if result == ExecutionResult.FAILURE: - self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex) - break - elif result == ExecutionResult.PENDING: - execution_list.unstage_node_execution() - else: # result == ExecutionResult.SUCCESS: - execution_list.complete_node_execution() - else: - # Only execute when the while-loop ends without break - self.add_message("execution_success", { "prompt_id": prompt_id }, broadcast=False) - - ui_outputs = {} - meta_outputs = {} - all_node_ids = self.caches.ui.all_node_ids() - for node_id in all_node_ids: - ui_info = self.caches.ui.get(node_id) - if ui_info is not None: - ui_outputs[node_id] = ui_info["output"] - meta_outputs[node_id] = ui_info["meta"] - self.history_result = { - "outputs": ui_outputs, - "meta": meta_outputs, - } - self.server.last_node_id = None - if model_management.DISABLE_SMART_MEMORY: - model_management.unload_all_models() - - @property - def outputs_ui(self) -> dict | None: - return self.history_result["outputs"] if self.history_result is not None else None - - -def validate_inputs(prompt, item, validated: typing.Dict[str, ValidateInputsTuple]) -> ValidateInputsTuple: - # todo: this should check if LoadImage / LoadImageMask paths exist - # todo: or, nodes should provide a way to validate their values - unique_id = item - if unique_id in validated: - return validated[unique_id] - - inputs = prompt[unique_id]['inputs'] - class_type = prompt[unique_id]['class_type'] - obj_class = nodes.NODE_CLASS_MAPPINGS[class_type] - - class_inputs = obj_class.INPUT_TYPES() - valid_inputs = set(class_inputs.get('required', {})).union(set(class_inputs.get('optional', {}))) - - error: ValidationErrorDict - errors = [] - valid = True - - # todo: investigate if these are at the right indent level - info: Optional[InputTypeSpec] = None - val = None - - validate_function_inputs = [] - validate_has_kwargs = False - if hasattr(obj_class, "VALIDATE_INPUTS"): - argspec = inspect.getfullargspec(obj_class.VALIDATE_INPUTS) - validate_function_inputs = argspec.args - validate_has_kwargs = argspec.varkw is not None - received_types = {} - - for x in valid_inputs: - type_input, input_category, extra_info = get_input_info(obj_class, x) - assert extra_info is not None - if x not in inputs: - if input_category == "required": - error = { - "type": "required_input_missing", - "message": "Required input is missing", - "details": f"{x}", - "extra_info": { - "input_name": x - } - } - errors.append(error) - continue - - val = inputs[x] - info: InputTypeSpec = (type_input, extra_info) - if isinstance(val, list): - if len(val) != 2: - error = { - "type": "bad_linked_input", - "message": "Bad linked input, must be a length-2 list of [node_id, slot_index]", - "details": f"{x}", - "extra_info": { - "input_name": x, - "input_config": info, - "received_value": val - } - } - errors.append(error) - continue - - o_id = val[0] - o_class_type = prompt[o_id]['class_type'] - r = nodes.NODE_CLASS_MAPPINGS[o_class_type].RETURN_TYPES - received_type = r[val[1]] - received_types[x] = received_type - any_enum = received_type == [] and (isinstance(type_input, list) or isinstance(type_input, tuple)) - if 'input_types' not in validate_function_inputs and received_type != type_input and not any_enum: - details = f"{x}, {received_type} != {type_input}" - error = { - "type": "return_type_mismatch", - "message": "Return type mismatch between linked nodes", - "details": details, - "extra_info": { - "input_name": x, - "input_config": info, - "received_type": received_type, - "linked_node": val - } - } - errors.append(error) - continue - try: - r2 = validate_inputs(prompt, o_id, validated) - if r2[0] is False: - # `r` will be set in `validated[o_id]` already - valid = False - continue - except Exception as ex: - typ, _, tb = sys.exc_info() - valid = False - exception_type = full_type_name(typ) - reasons = [{ - "type": "exception_during_inner_validation", - "message": "Exception when validating inner node", - "details": str(ex), - "extra_info": { - "input_name": x, - "input_config": info, - "exception_message": str(ex), - "exception_type": exception_type, - "traceback": traceback.format_tb(tb), - "linked_node": val - } - }] - validated[o_id] = ValidateInputsTuple(False, reasons, o_id) - continue - else: - try: - if type_input == "INT": - val = int(val) - inputs[x] = val - if type_input == "FLOAT": - val = float(val) - inputs[x] = val - if type_input == "STRING": - val = str(val) - inputs[x] = val - if type_input == "BOOLEAN": - val = bool(val) - inputs[x] = val - except Exception as ex: - error = { - "type": "invalid_input_type", - "message": f"Failed to convert an input value to a {type_input} value", - "details": f"{x}, {val}, {ex}", - "extra_info": { - "input_name": x, - "input_config": info, - "received_value": val, - "exception_message": str(ex) - } - } - errors.append(error) - continue - - if x not in validate_function_inputs: - has_min_max: IntSpecOptions | FloatSpecOptions = info[1] - if "min" in has_min_max and val < has_min_max["min"]: - error = { - "type": "value_smaller_than_min", - "message": "Value {} smaller than min of {}".format(val, has_min_max["min"]), - "details": f"{x}", - "extra_info": { - "input_name": x, - "input_config": info, - "received_value": val, - } - } - errors.append(error) - continue - if "max" in has_min_max and val > has_min_max["max"]: - error = { - "type": "value_bigger_than_max", - "message": "Value {} bigger than max of {}".format(val, has_min_max["max"]), - "details": f"{x}", - "extra_info": { - "input_name": x, - "input_config": info, - "received_value": val, - } - } - errors.append(error) - continue - - if isinstance(type_input, list): - if "\\" in val: - # try to normalize paths for comparison purposes - val = canonicalize_path(val) - if all(isinstance(item, (str, PathLike)) for item in type_input): - type_input = [canonicalize_path(item) for item in type_input] - if val not in type_input: - input_config = info - list_info = "" - - # Don't send back gigantic lists like if they're lots of - # scanned model filepaths - if len(type_input) > 20: - list_info = f"(list of length {len(type_input)})" - input_config = None - else: - list_info = str(type_input) - - error = { - "type": "value_not_in_list", - "message": "Value not in list", - "details": f"{x}: '{val}' not in {list_info}", - "extra_info": { - "input_name": x, - "input_config": input_config, - "received_value": val, - } - } - errors.append(error) - continue - - if len(validate_function_inputs) > 0 or validate_has_kwargs: - input_data_all, _ = get_input_data(inputs, obj_class, unique_id) - input_filtered = {} - for x in input_data_all: - if x in validate_function_inputs or validate_has_kwargs: - input_filtered[x] = input_data_all[x] - if 'input_types' in validate_function_inputs: - input_filtered['input_types'] = [received_types] - - # ret = obj_class.VALIDATE_INPUTS(**input_filtered) - ret = map_node_over_list(obj_class, input_filtered, "VALIDATE_INPUTS") - for x in input_filtered: - for i, r in enumerate(ret): - if r is not True and not isinstance(r, ExecutionBlocker): - details = f"{x}" - if r is not False: - details += f" - {str(r)}" - - error = { - "type": "custom_validation_failed", - "message": "Custom validation failed for node", - "details": details, - "extra_info": { - "input_name": x, - } - } - errors.append(error) - continue - - if len(errors) > 0 or valid is not True: - ret = ValidateInputsTuple(False, errors, unique_id) - else: - ret = ValidateInputsTuple(True, [], unique_id) - - validated[unique_id] = ret - return ret - - -def full_type_name(klass): - module = klass.__module__ - if module == 'builtins': - return klass.__qualname__ - return module + '.' + klass.__qualname__ - - -@tracer.start_as_current_span("Validate Prompt") -def validate_prompt(prompt: typing.Mapping[str, typing.Any]) -> ValidationTuple: - res = _validate_prompt(prompt) - if not res.valid: - span = get_current_span() - span.set_status(Status(StatusCode.ERROR)) - if res.error is not None and len(res.error) > 0: - span.set_attributes({ - f"error.{k}": v for k, v in res.error.items() if isinstance(v, (bool, str, bytes, int, float, list)) - }) - if "extra_info" in res.error and isinstance(res.error["extra_info"], dict): - extra_info: ValidationErrorExtraInfoDict = res.error["extra_info"] - span.set_attributes({ - f"error.extra_info.{k}": v for k, v in extra_info.items() if isinstance(v, (str, list)) - }) - if len(res.node_errors) > 0: - for node_id, node_error in res.node_errors.items(): - for node_error_field, node_error_value in node_error.items(): - if isinstance(node_error_value, (str, bool, int, float)): - span.set_attribute(f"node_errors.{node_id}.{node_error_field}", node_error_value) - return res - - -def _validate_prompt(prompt: typing.Mapping[str, typing.Any]) -> ValidationTuple: - outputs = set() - for x in prompt: - if 'class_type' not in prompt[x]: - error = { - "type": "invalid_prompt", - "message": f"Cannot execute because a node is missing the class_type property.", - "details": f"Node ID '#{x}'", - "extra_info": {} - } - return ValidationTuple(False, error, [], []) - - class_type = prompt[x]['class_type'] - class_ = nodes.NODE_CLASS_MAPPINGS.get(class_type, None) - if class_ is None: - error = { - "type": "invalid_prompt", - "message": f"Cannot execute because node {class_type} does not exist.", - "details": f"Node ID '#{x}'", - "extra_info": {} - } - return ValidationTuple(False, error, [], []) - - if hasattr(class_, 'OUTPUT_NODE') and class_.OUTPUT_NODE is True: - outputs.add(x) - - if len(outputs) == 0: - error = { - "type": "prompt_no_outputs", - "message": "Prompt has no outputs", - "details": "", - "extra_info": {} - } - return ValidationTuple(False, error, [], []) - - good_outputs = set() - errors = [] - node_errors: typing.Dict[str, NodeErrorsDictValue] = {} - validated: typing.Dict[str, ValidateInputsTuple] = {} - for o in outputs: - valid = False - reasons: List[ValidationErrorDict] = [] - try: - m = validate_inputs(prompt, o, validated) - valid = m[0] - reasons = m[1] - except Exception as ex: - typ, _, tb = sys.exc_info() - valid = False - exception_type = full_type_name(typ) - reasons = [{ - "type": "exception_during_validation", - "message": "Exception when validating node", - "details": str(ex), - "extra_info": { - "exception_type": exception_type, - "traceback": traceback.format_tb(tb) - } - }] - validated[o] = ValidateInputsTuple(False, reasons, o) - - if valid is True: - good_outputs.add(o) - else: - logging.error(f"Failed to validate prompt for output {o}:") - if len(reasons) > 0: - logging.error("* (prompt):") - for reason in reasons: - logging.error(f" - {reason['message']}: {reason['details']}") - errors += [(o, reasons)] - for node_id, result in validated.items(): - valid = result[0] - reasons = result[1] - # If a node upstream has errors, the nodes downstream will also - # be reported as invalid, but there will be no errors attached. - # So don't return those nodes as having errors in the response. - if valid is not True and len(reasons) > 0: - if node_id not in node_errors: - class_type = prompt[node_id]['class_type'] - node_errors[node_id] = { - "errors": reasons, - "dependent_outputs": [], - "class_type": class_type - } - logging.error(f"* {class_type} {node_id}:") - for reason in reasons: - logging.error(f" - {reason['message']}: {reason['details']}") - node_errors[node_id]["dependent_outputs"].append(o) - logging.error("Output will be ignored") - - if len(good_outputs) == 0: - errors_list = [] - for o, _errors in errors: - for error in _errors: - errors_list.append(f"{error['message']}: {error['details']}") - errors_list = "\n".join(errors_list) - - error = { - "type": "prompt_outputs_failed_validation", - "message": "Prompt outputs failed validation", - "details": errors_list, - "extra_info": {} - } - - return ValidationTuple(False, error, list(good_outputs), node_errors) - - return ValidationTuple(True, None, list(good_outputs), node_errors) - - -class PromptQueue(AbstractPromptQueue): - def __init__(self, server: ExecutorToClientProgress): - self.server = server - self.mutex = threading.RLock() - self.not_empty = threading.Condition(self.mutex) - self.queue: typing.List[QueueItem] = [] - self.currently_running: typing.Dict[str, QueueItem] = {} - # history maps the second integer prompt id in the queue tuple to a dictionary with keys "prompt" and "outputs - # todo: use the new History class for the sake of simplicity - self.history: typing.Dict[str, HistoryEntry] = {} - self.flags = {} - - def size(self) -> int: - return len(self.queue) - - def put(self, item: QueueItem): - with self.mutex: - heapq.heappush(self.queue, item) - self.server.queue_updated() - self.not_empty.notify() - - def get(self, timeout=None) -> typing.Optional[typing.Tuple[QueueTuple, str]]: - with self.not_empty: - while len(self.queue) == 0: - self.not_empty.wait(timeout=timeout) - if timeout is not None and len(self.queue) == 0: - return None - item_with_future: QueueItem = heapq.heappop(self.queue) - assert item_with_future.prompt_id is not None - assert item_with_future.prompt_id != "" - assert item_with_future.prompt_id not in self.currently_running - assert isinstance(item_with_future.prompt_id, str) - task_id = item_with_future.prompt_id - self.currently_running[task_id] = item_with_future - self.server.queue_updated() - return copy.deepcopy(item_with_future.queue_tuple), task_id - - def task_done(self, item_id: str, outputs: dict, - status: Optional[ExecutionStatus]): - history_result = outputs - with self.mutex: - queue_item = self.currently_running.pop(item_id) - prompt = queue_item.queue_tuple - if len(self.history) > MAXIMUM_HISTORY_SIZE: - self.history.pop(next(iter(self.history))) - - status_dict: Optional[dict] = None - if status is not None: - status_dict = copy.deepcopy(ExecutionStatus(*status)._asdict()) - - outputs_ = history_result["outputs"] - self.history[prompt[1]] = { - "prompt": prompt, - "outputs": copy.deepcopy(outputs_), - 'status': status_dict, - } - self.history[prompt[1]].update(history_result) - self.server.queue_updated() - if queue_item.completed: - queue_item.completed.set_result(outputs_) - - def get_current_queue(self) -> Tuple[typing.List[QueueTuple], typing.List[QueueTuple]]: - with self.mutex: - out: typing.List[QueueTuple] = [] - for x in self.currently_running.values(): - out += [x.queue_tuple] - return out, copy.deepcopy([item.queue_tuple for item in self.queue]) - - def get_tasks_remaining(self): - with self.mutex: - return len(self.queue) + len(self.currently_running) - - def wipe_queue(self): - with self.mutex: - for item in self.queue: - if item.completed: - item.completed.set_exception(Exception("queue cancelled")) - self.queue = [] - self.server.queue_updated() - - def delete_queue_item(self, function): - with self.mutex: - for x in range(len(self.queue)): - if function(self.queue[x].queue_tuple): - if len(self.queue) == 1: - self.wipe_queue() - else: - item = self.queue.pop(x) - if item.completed: - item.completed.set_exception(Exception("queue item deleted")) - heapq.heapify(self.queue) - self.server.queue_updated() - return True - return False - - def get_history(self, prompt_id=None, max_items=None, offset=-1): - with self.mutex: - if prompt_id is None: - out = {} - i = 0 - if offset < 0 and max_items is not None: - offset = len(self.history) - max_items - for k in self.history: - if i >= offset: - out[k] = self.history[k] - if max_items is not None and len(out) >= max_items: - break - i += 1 - return out - elif prompt_id in self.history: - return {prompt_id: copy.deepcopy(self.history[prompt_id])} - else: - return {} - - def wipe_history(self): - with self.mutex: - self.history.clear() - - def delete_history_item(self, id_to_delete: str): - with self.mutex: - self.history.pop(id_to_delete, None) - - def set_flag(self, name, data): - with self.mutex: - self.flags[name] = data - self.not_empty.notify() - - def get_flags(self, reset=True): - with self.mutex: - if reset: - ret = self.flags - self.flags = {} - return ret - else: - return self.flags.copy() diff --git a/comfy/graph.py b/comfy/graph.py deleted file mode 100644 index 74babe200..000000000 --- a/comfy/graph.py +++ /dev/null @@ -1,234 +0,0 @@ -from .cmd.execution import nodes -from .component_model.executor_types import DependencyCycleError, NodeInputError, NodeNotFoundError -from .graph_utils import is_link - - -class DynamicPrompt: - def __init__(self, original_prompt): - # The original prompt provided by the user - self.original_prompt = original_prompt - # Any extra pieces of the graph created during execution - self.ephemeral_prompt = {} - self.ephemeral_parents = {} - self.ephemeral_display = {} - - def get_node(self, node_id): - if node_id in self.ephemeral_prompt: - return self.ephemeral_prompt[node_id] - if node_id in self.original_prompt: - return self.original_prompt[node_id] - raise NodeNotFoundError(f"Node {node_id} not found") - - def has_node(self, node_id): - return node_id in self.original_prompt or node_id in self.ephemeral_prompt - - def add_ephemeral_node(self, node_id, node_info, parent_id, display_id): - self.ephemeral_prompt[node_id] = node_info - self.ephemeral_parents[node_id] = parent_id - self.ephemeral_display[node_id] = display_id - - def get_real_node_id(self, node_id): - while node_id in self.ephemeral_parents: - node_id = self.ephemeral_parents[node_id] - return node_id - - def get_parent_node_id(self, node_id): - return self.ephemeral_parents.get(node_id, None) - - def get_display_node_id(self, node_id): - while node_id in self.ephemeral_display: - node_id = self.ephemeral_display[node_id] - return node_id - - def all_node_ids(self): - return set(self.original_prompt.keys()).union(set(self.ephemeral_prompt.keys())) - - def get_original_prompt(self): - return self.original_prompt - - -def get_input_info(class_def, input_name): - valid_inputs = class_def.INPUT_TYPES() - input_info = None - input_category = None - if "required" in valid_inputs and input_name in valid_inputs["required"]: - input_category = "required" - input_info = valid_inputs["required"][input_name] - elif "optional" in valid_inputs and input_name in valid_inputs["optional"]: - input_category = "optional" - input_info = valid_inputs["optional"][input_name] - elif "hidden" in valid_inputs and input_name in valid_inputs["hidden"]: - input_category = "hidden" - input_info = valid_inputs["hidden"][input_name] - if input_info is None: - return None, None, None - input_type = input_info[0] - if len(input_info) > 1: - extra_info = input_info[1] - else: - extra_info = {} - return input_type, input_category, extra_info - - -class TopologicalSort: - def __init__(self, dynprompt): - self.dynprompt = dynprompt - self.pendingNodes = {} - self.blockCount = {} # Number of nodes this node is directly blocked by - self.blocking = {} # Which nodes are blocked by this node - - def get_input_info(self, unique_id, input_name): - class_type = self.dynprompt.get_node(unique_id)["class_type"] - class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - return get_input_info(class_def, input_name) - - def make_input_strong_link(self, to_node_id, to_input): - inputs = self.dynprompt.get_node(to_node_id)["inputs"] - if to_input not in inputs: - raise NodeInputError(f"Node {to_node_id} says it needs input {to_input}, but there is no input to that node at all") - value = inputs[to_input] - if not is_link(value): - raise NodeInputError(f"Node {to_node_id} says it needs input {to_input}, but that value is a constant") - from_node_id, from_socket = value - self.add_strong_link(from_node_id, from_socket, to_node_id) - - def add_strong_link(self, from_node_id, from_socket, to_node_id): - self.add_node(from_node_id) - if to_node_id not in self.blocking[from_node_id]: - self.blocking[from_node_id][to_node_id] = {} - self.blockCount[to_node_id] += 1 - self.blocking[from_node_id][to_node_id][from_socket] = True - - def add_node(self, unique_id, include_lazy=False, subgraph_nodes=None): - if unique_id in self.pendingNodes: - return - self.pendingNodes[unique_id] = True - self.blockCount[unique_id] = 0 - self.blocking[unique_id] = {} - - inputs = self.dynprompt.get_node(unique_id)["inputs"] - for input_name in inputs: - value = inputs[input_name] - if is_link(value): - from_node_id, from_socket = value - if subgraph_nodes is not None and from_node_id not in subgraph_nodes: - continue - input_type, input_category, input_info = self.get_input_info(unique_id, input_name) - is_lazy = input_info is not None and "lazy" in input_info and input_info["lazy"] - if include_lazy or not is_lazy: - self.add_strong_link(from_node_id, from_socket, unique_id) - - def get_ready_nodes(self): - return [node_id for node_id in self.pendingNodes if self.blockCount[node_id] == 0] - - def pop_node(self, unique_id): - del self.pendingNodes[unique_id] - for blocked_node_id in self.blocking[unique_id]: - self.blockCount[blocked_node_id] -= 1 - del self.blocking[unique_id] - - def is_empty(self): - return len(self.pendingNodes) == 0 - - -class ExecutionList(TopologicalSort): - """ - ExecutionList implements a topological dissolve of the graph. After a node is staged for execution, - it can still be returned to the graph after having further dependencies added. - """ - - def __init__(self, dynprompt, output_cache): - super().__init__(dynprompt) - self.output_cache = output_cache - self.staged_node_id = None - - def add_strong_link(self, from_node_id, from_socket, to_node_id): - if self.output_cache.get(from_node_id) is not None: - # Nothing to do - return - super().add_strong_link(from_node_id, from_socket, to_node_id) - - def stage_node_execution(self): - assert self.staged_node_id is None - if self.is_empty(): - return None, None, None - available = self.get_ready_nodes() - if len(available) == 0: - cycled_nodes = self.get_nodes_in_cycle() - # Because cycles composed entirely of static nodes are caught during initial validation, - # we will 'blame' the first node in the cycle that is not a static node. - blamed_node = cycled_nodes[0] - for node_id in cycled_nodes: - display_node_id = self.dynprompt.get_display_node_id(node_id) - if display_node_id != node_id: - blamed_node = display_node_id - break - ex = DependencyCycleError("Dependency cycle detected") - error_details = { - "node_id": blamed_node, - "exception_message": str(ex), - "exception_type": "graph.DependencyCycleError", - "traceback": [], - "current_inputs": [] - } - return None, error_details, ex - next_node = available[0] - # If an output node is available, do that first. - # Technically this has no effect on the overall length of execution, but it feels better as a user - # for a PreviewImage to display a result as soon as it can - # Some other heuristics could probably be used here to improve the UX further. - for node_id in available: - class_type = self.dynprompt.get_node(node_id)["class_type"] - class_def = nodes.NODE_CLASS_MAPPINGS[class_type] - if hasattr(class_def, 'OUTPUT_NODE') and class_def.OUTPUT_NODE == True: - next_node = node_id - break - self.staged_node_id = next_node - return self.staged_node_id, None, None - - def unstage_node_execution(self): - assert self.staged_node_id is not None - self.staged_node_id = None - - def complete_node_execution(self): - node_id = self.staged_node_id - self.pop_node(node_id) - self.staged_node_id = None - - def get_nodes_in_cycle(self): - # We'll dissolve the graph in reverse topological order to leave only the nodes in the cycle. - # We're skipping some of the performance optimizations from the original TopologicalSort to keep - # the code simple (and because having a cycle in the first place is a catastrophic error) - blocked_by = {node_id: {} for node_id in self.pendingNodes} - for from_node_id in self.blocking: - for to_node_id in self.blocking[from_node_id]: - if True in self.blocking[from_node_id][to_node_id].values(): - blocked_by[to_node_id][from_node_id] = True - to_remove = [node_id for node_id in blocked_by if len(blocked_by[node_id]) == 0] - while len(to_remove) > 0: - for node_id in to_remove: - for to_node_id in blocked_by: - if node_id in blocked_by[to_node_id]: - del blocked_by[to_node_id][node_id] - del blocked_by[node_id] - to_remove = [node_id for node_id in blocked_by if len(blocked_by[node_id]) == 0] - return list(blocked_by.keys()) - - -class ExecutionBlocker: - """ - Return this from a node and any users will be blocked with the given error message. - If the message is None, execution will be blocked silently instead. - Generally, you should avoid using this functionality unless absolutely necessary. Whenever it's - possible, a lazy input will be more efficient and have a better user experience. - This functionality is useful in two cases: - 1. You want to conditionally prevent an output node from executing. (Particularly a built-in node - like SaveImage. For your own output nodes, I would recommend just adding a BOOL input and using - lazy evaluation to let it conditionally disable itself.) - 2. You have a node with multiple possible outputs, some of which are invalid and should not be used. - (I would recommend not making nodes like this in the future -- instead, make multiple nodes with - different outputs. Unfortunately, there are several popular existing nodes using this pattern.) - """ - - def __init__(self, message): - self.message = message diff --git a/comfy/graph_utils.py b/comfy/graph_utils.py deleted file mode 100644 index dfbdc147a..000000000 --- a/comfy/graph_utils.py +++ /dev/null @@ -1,143 +0,0 @@ -def is_link(obj): - if not isinstance(obj, list): - return False - if len(obj) != 2: - return False - if not isinstance(obj[0], str): - return False - if not isinstance(obj[1], int) and not isinstance(obj[1], float): - return False - return True - - -class GraphBuilder: - """ - The GraphBuilder is just a utility class that outputs graphs in the form expected by the ComfyUI back-end - """ - _default_prefix_root = "" - _default_prefix_call_index = 0 - _default_prefix_graph_index = 0 - - def __init__(self, prefix=None): - if prefix is None: - self.prefix = GraphBuilder.alloc_prefix() - else: - self.prefix = prefix - self.nodes = {} - self.id_gen = 1 - - @classmethod - def set_default_prefix(cls, prefix_root, call_index, graph_index=0): - cls._default_prefix_root = prefix_root - cls._default_prefix_call_index = call_index - cls._default_prefix_graph_index = graph_index - - @classmethod - def alloc_prefix(cls, root=None, call_index=None, graph_index=None): - if root is None: - root = GraphBuilder._default_prefix_root - if call_index is None: - call_index = GraphBuilder._default_prefix_call_index - if graph_index is None: - graph_index = GraphBuilder._default_prefix_graph_index - result = f"{root}.{call_index}.{graph_index}." - GraphBuilder._default_prefix_graph_index += 1 - return result - - def node(self, class_type, id=None, **kwargs): - if id is None: - id = str(self.id_gen) - self.id_gen += 1 - id = self.prefix + id - if id in self.nodes: - return self.nodes[id] - - node = Node(id, class_type, kwargs) - self.nodes[id] = node - return node - - def lookup_node(self, id): - id = self.prefix + id - return self.nodes.get(id) - - def finalize(self): - output = {} - for node_id, node in self.nodes.items(): - output[node_id] = node.serialize() - return output - - def replace_node_output(self, node_id, index, new_value): - node_id = self.prefix + node_id - to_remove = [] - for node in self.nodes.values(): - for key, value in node.inputs.items(): - if is_link(value) and value[0] == node_id and value[1] == index: - if new_value is None: - to_remove.append((node, key)) - else: - node.inputs[key] = new_value - for node, key in to_remove: - del node.inputs[key] - - def remove_node(self, id): - id = self.prefix + id - del self.nodes[id] - - -class Node: - def __init__(self, id, class_type, inputs): - self.id = id - self.class_type = class_type - self.inputs = inputs - self.override_display_id = None - - def out(self, index): - return [self.id, index] - - def set_input(self, key, value): - if value is None: - if key in self.inputs: - del self.inputs[key] - else: - self.inputs[key] = value - - def get_input(self, key): - return self.inputs.get(key) - - def set_override_display_id(self, override_display_id): - self.override_display_id = override_display_id - - def serialize(self): - serialized = { - "class_type": self.class_type, - "inputs": self.inputs - } - if self.override_display_id is not None: - serialized["override_display_id"] = self.override_display_id - return serialized - - -def add_graph_prefix(graph, outputs, prefix): - # Change the node IDs and any internal links - new_graph = {} - for node_id, node_info in graph.items(): - # Make sure the added nodes have unique IDs - new_node_id = prefix + node_id - new_node = {"class_type": node_info["class_type"], "inputs": {}} - for input_name, input_value in node_info.get("inputs", {}).items(): - if is_link(input_value): - new_node["inputs"][input_name] = [prefix + input_value[0], input_value[1]] - else: - new_node["inputs"][input_name] = input_value - new_graph[new_node_id] = new_node - - # Change the node IDs in the outputs - new_outputs = [] - for n in range(len(outputs)): - output = outputs[n] - if is_link(output): - new_outputs.append([prefix + output[0], output[1]]) - else: - new_outputs.append(output) - - return new_graph, tuple(new_outputs)