mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-17 10:02:59 +08:00
add detail debug
This commit is contained in:
parent
fa19dd4620
commit
f40e00cb35
26
execution.py
26
execution.py
@ -400,7 +400,12 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
|||||||
inputs = dynprompt.get_node(unique_id)['inputs']
|
inputs = dynprompt.get_node(unique_id)['inputs']
|
||||||
class_type = dynprompt.get_node(unique_id)['class_type']
|
class_type = dynprompt.get_node(unique_id)['class_type']
|
||||||
class_def = nodes.NODE_CLASS_MAPPINGS[class_type]
|
class_def = nodes.NODE_CLASS_MAPPINGS[class_type]
|
||||||
|
|
||||||
|
# Log node execution start
|
||||||
|
logging.info(f"📍 Node [{display_node_id}] START: {class_type}")
|
||||||
|
|
||||||
if caches.outputs.get(unique_id) is not None:
|
if caches.outputs.get(unique_id) is not None:
|
||||||
|
logging.info(f"✅ Node [{display_node_id}] CACHED: {class_type} (using cached output)")
|
||||||
if server.client_id is not None:
|
if server.client_id is not None:
|
||||||
cached_output = caches.ui.get(unique_id) or {}
|
cached_output = caches.ui.get(unique_id) or {}
|
||||||
server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id)
|
server.send_sync("executed", { "node": unique_id, "display_node": display_node_id, "output": cached_output.get("output",None), "prompt_id": prompt_id }, server.client_id)
|
||||||
@ -446,15 +451,20 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
|||||||
has_subgraph = False
|
has_subgraph = False
|
||||||
else:
|
else:
|
||||||
get_progress_state().start_progress(unique_id)
|
get_progress_state().start_progress(unique_id)
|
||||||
|
logging.info(f"🔍 Node [{display_node_id}] Getting input data for {class_type}")
|
||||||
input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data)
|
input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data)
|
||||||
|
logging.info(f"📥 Node [{display_node_id}] Input data ready, keys: {list(input_data_all.keys())}")
|
||||||
if server.client_id is not None:
|
if server.client_id is not None:
|
||||||
server.last_node_id = display_node_id
|
server.last_node_id = display_node_id
|
||||||
server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id)
|
server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id)
|
||||||
|
|
||||||
obj = caches.objects.get(unique_id)
|
obj = caches.objects.get(unique_id)
|
||||||
if obj is None:
|
if obj is None:
|
||||||
|
logging.info(f"🏗️ Node [{display_node_id}] Creating new instance of {class_type}")
|
||||||
obj = class_def()
|
obj = class_def()
|
||||||
caches.objects.set(unique_id, obj)
|
caches.objects.set(unique_id, obj)
|
||||||
|
else:
|
||||||
|
logging.info(f"♻️ Node [{display_node_id}] Reusing cached instance of {class_type}")
|
||||||
|
|
||||||
if issubclass(class_def, _ComfyNodeInternal):
|
if issubclass(class_def, _ComfyNodeInternal):
|
||||||
lazy_status_present = first_real_override(class_def, "check_lazy_status") is not None
|
lazy_status_present = first_real_override(class_def, "check_lazy_status") is not None
|
||||||
@ -493,7 +503,9 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
|||||||
def pre_execute_cb(call_index):
|
def pre_execute_cb(call_index):
|
||||||
# TODO - How to handle this with async functions without contextvars (which requires Python 3.12)?
|
# TODO - How to handle this with async functions without contextvars (which requires Python 3.12)?
|
||||||
GraphBuilder.set_default_prefix(unique_id, call_index, 0)
|
GraphBuilder.set_default_prefix(unique_id, call_index, 0)
|
||||||
|
logging.info(f"⚙️ Node [{display_node_id}] Executing {class_type}")
|
||||||
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
|
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
|
||||||
|
logging.info(f"📤 Node [{display_node_id}] Execution completed, has_subgraph: {has_subgraph}, has_pending: {has_pending_tasks}")
|
||||||
if has_pending_tasks:
|
if has_pending_tasks:
|
||||||
pending_async_nodes[unique_id] = output_data
|
pending_async_nodes[unique_id] = output_data
|
||||||
unblock = execution_list.add_external_block(unique_id)
|
unblock = execution_list.add_external_block(unique_id)
|
||||||
@ -572,6 +584,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
|||||||
for name, inputs in input_data_all.items():
|
for name, inputs in input_data_all.items():
|
||||||
input_data_formatted[name] = [format_value(x) for x in inputs]
|
input_data_formatted[name] = [format_value(x) for x in inputs]
|
||||||
|
|
||||||
|
logging.error(f"❌ Node [{display_node_id}] FAILED: {class_type}")
|
||||||
logging.error(f"!!! Exception during processing !!! {ex}")
|
logging.error(f"!!! Exception during processing !!! {ex}")
|
||||||
logging.error(traceback.format_exc())
|
logging.error(traceback.format_exc())
|
||||||
tips = ""
|
tips = ""
|
||||||
@ -593,6 +606,8 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
|||||||
|
|
||||||
get_progress_state().finish_progress(unique_id)
|
get_progress_state().finish_progress(unique_id)
|
||||||
executed.add(unique_id)
|
executed.add(unique_id)
|
||||||
|
|
||||||
|
logging.info(f"✅ Node [{display_node_id}] SUCCESS: {class_type} completed")
|
||||||
|
|
||||||
return (ExecutionResult.SUCCESS, None, None)
|
return (ExecutionResult.SUCCESS, None, None)
|
||||||
|
|
||||||
@ -649,6 +664,7 @@ class PromptExecutor:
|
|||||||
asyncio.run(self.execute_async(prompt, prompt_id, extra_data, execute_outputs))
|
asyncio.run(self.execute_async(prompt, prompt_id, extra_data, execute_outputs))
|
||||||
|
|
||||||
async def execute_async(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
|
async def execute_async(self, prompt, prompt_id, extra_data={}, execute_outputs=[]):
|
||||||
|
logging.info(f"🚀 Workflow execution START: prompt_id={prompt_id}, nodes_count={len(prompt)}")
|
||||||
nodes.interrupt_processing(False)
|
nodes.interrupt_processing(False)
|
||||||
|
|
||||||
if "client_id" in extra_data:
|
if "client_id" in extra_data:
|
||||||
@ -672,6 +688,9 @@ class PromptExecutor:
|
|||||||
for node_id in prompt:
|
for node_id in prompt:
|
||||||
if self.caches.outputs.get(node_id) is not None:
|
if self.caches.outputs.get(node_id) is not None:
|
||||||
cached_nodes.append(node_id)
|
cached_nodes.append(node_id)
|
||||||
|
|
||||||
|
if len(cached_nodes) > 0:
|
||||||
|
logging.info(f"💾 Workflow has {len(cached_nodes)} cached nodes: {cached_nodes}")
|
||||||
|
|
||||||
comfy.model_management.cleanup_models_gc()
|
comfy.model_management.cleanup_models_gc()
|
||||||
self.add_message("execution_cached",
|
self.add_message("execution_cached",
|
||||||
@ -684,6 +703,8 @@ class PromptExecutor:
|
|||||||
current_outputs = self.caches.outputs.all_node_ids()
|
current_outputs = self.caches.outputs.all_node_ids()
|
||||||
for node_id in list(execute_outputs):
|
for node_id in list(execute_outputs):
|
||||||
execution_list.add_node(node_id)
|
execution_list.add_node(node_id)
|
||||||
|
|
||||||
|
logging.info(f"📋 Workflow execution list prepared, executing {len(execute_outputs)} output nodes")
|
||||||
|
|
||||||
while not execution_list.is_empty():
|
while not execution_list.is_empty():
|
||||||
node_id, error, ex = await execution_list.stage_node_execution()
|
node_id, error, ex = await execution_list.stage_node_execution()
|
||||||
@ -695,6 +716,7 @@ class PromptExecutor:
|
|||||||
result, error, ex = await execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes)
|
result, error, ex = await execute(self.server, dynamic_prompt, self.caches, node_id, extra_data, executed, prompt_id, execution_list, pending_subgraph_results, pending_async_nodes)
|
||||||
self.success = result != ExecutionResult.FAILURE
|
self.success = result != ExecutionResult.FAILURE
|
||||||
if result == ExecutionResult.FAILURE:
|
if result == ExecutionResult.FAILURE:
|
||||||
|
logging.error(f"💥 Workflow execution FAILED at node {node_id}")
|
||||||
self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex)
|
self.handle_execution_error(prompt_id, dynamic_prompt.original_prompt, current_outputs, executed, error, ex)
|
||||||
break
|
break
|
||||||
elif result == ExecutionResult.PENDING:
|
elif result == ExecutionResult.PENDING:
|
||||||
@ -703,6 +725,7 @@ class PromptExecutor:
|
|||||||
execution_list.complete_node_execution()
|
execution_list.complete_node_execution()
|
||||||
else:
|
else:
|
||||||
# Only execute when the while-loop ends without break
|
# Only execute when the while-loop ends without break
|
||||||
|
logging.info(f"🎉 Workflow execution SUCCESS: prompt_id={prompt_id}, executed_nodes={len(executed)}")
|
||||||
self.add_message("execution_success", { "prompt_id": prompt_id }, broadcast=False)
|
self.add_message("execution_success", { "prompt_id": prompt_id }, broadcast=False)
|
||||||
|
|
||||||
ui_outputs = {}
|
ui_outputs = {}
|
||||||
@ -719,7 +742,10 @@ class PromptExecutor:
|
|||||||
}
|
}
|
||||||
self.server.last_node_id = None
|
self.server.last_node_id = None
|
||||||
if comfy.model_management.DISABLE_SMART_MEMORY:
|
if comfy.model_management.DISABLE_SMART_MEMORY:
|
||||||
|
logging.info("🧹 Unloading all models (DISABLE_SMART_MEMORY is enabled)")
|
||||||
comfy.model_management.unload_all_models()
|
comfy.model_management.unload_all_models()
|
||||||
|
|
||||||
|
logging.info(f"✨ Workflow execution COMPLETED: prompt_id={prompt_id}")
|
||||||
|
|
||||||
|
|
||||||
async def validate_inputs(prompt_id, prompt, item, validated):
|
async def validate_inputs(prompt_id, prompt, item, validated):
|
||||||
|
|||||||
@ -673,7 +673,7 @@ class PromptServer():
|
|||||||
|
|
||||||
@routes.post("/prompt")
|
@routes.post("/prompt")
|
||||||
async def post_prompt(request):
|
async def post_prompt(request):
|
||||||
logging.info("got prompt")
|
logging.info("got prompt in debug comfyui")
|
||||||
json_data = await request.json()
|
json_data = await request.json()
|
||||||
json_data = self.trigger_on_prompt(json_data)
|
json_data = self.trigger_on_prompt(json_data)
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user