Compare commits

...

11 Commits

Author SHA1 Message Date
Godwin Iheuwa
fed84ab18c
Merge 76eb8f26af into fc0cb10bcb 2026-01-07 14:48:57 +05:30
Godwin Iheuwa
76eb8f26af
Merge branch 'master' into fix/custom-node-import-failure-context 2026-01-07 14:48:55 +05:30
comfyanonymous
fc0cb10bcb ComfyUI v0.8.0 2026-01-07 04:07:31 -05:00
comfyanonymous
b7d7cc1d49
Fix fp8 fast issue. (#11688) 2026-01-07 01:39:06 -05:00
Alexander Piskun
79e94544bd
feat(api-nodes): add WAN2.6 ReferenceToVideo (#11644) 2026-01-06 22:04:50 -08:00
Yoland Yan
ce0000c4f2
Force sequential execution in CI test jobs (#11687)
Added max-parallel setting to enforce sequential execution in test jobs.
2026-01-07 00:57:31 -05:00
comfyanonymous
c5cfb34c07
Update comfy-kitchen version to 0.2.3 (#11685) 2026-01-06 23:51:45 -05:00
comfyanonymous
edee33f55e
Disable comfy kitchen cuda if pytorch cuda less than 13 (#11681)
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Build package / Build Test (3.10) (push) Waiting to run
Build package / Build Test (3.11) (push) Waiting to run
Build package / Build Test (3.12) (push) Waiting to run
Build package / Build Test (3.13) (push) Waiting to run
Build package / Build Test (3.14) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
2026-01-06 22:13:43 -05:00
comfyanonymous
2c03884f5f
Skip fp4 matrix mult on devices that don't support it. (#11677) 2026-01-06 18:07:26 -05:00
RUiNtheExtinct
d23a9633d9 Merge origin/master into fix/custom-node-import-failure-context 2025-12-29 23:08:34 +05:30
RUiNtheExtinct
1bb97c480d fix: Show custom node import failure reasons in summary
When custom nodes fail to import, the summary now shows the exception
type and message instead of just "(IMPORT FAILED)".

Before:
  0.0 seconds (IMPORT FAILED): custom_nodes/my_node

After:
  0.0 seconds (IMPORT FAILED: ImportError: No module named 'xyz'): custom_nodes/my_node

Changes:
- Add IMPORT_FAILED_REASONS dict to store failure context
- Capture exception type and first line of message (max 100 chars)
- Include failure reason in import summary output

This helps users quickly diagnose why custom nodes failed to load
without needing to scroll through the full traceback.

Fixes #11454
2025-12-28 12:16:03 +05:30
12 changed files with 304 additions and 11 deletions

View File

@ -20,6 +20,7 @@ jobs:
test-stable:
strategy:
fail-fast: false
max-parallel: 1 # This forces sequential execution
matrix:
# os: [macos, linux, windows]
# os: [macos, linux]
@ -74,6 +75,7 @@ jobs:
test-unix-nightly:
strategy:
fail-fast: false
max-parallel: 1 # This forces sequential execution
matrix:
# os: [macos, linux]
os: [linux]

View File

@ -1504,6 +1504,16 @@ def supports_fp8_compute(device=None):
return True
def supports_nvfp4_compute(device=None):
if not is_nvidia():
return False
props = torch.cuda.get_device_properties(device)
if props.major < 10:
return False
return True
def extended_fp16_support():
# TODO: check why some models work with fp16 on newer torch versions but not on older
if torch_version_numeric < (2, 7):

View File

@ -427,12 +427,12 @@ def fp8_linear(self, input):
input = torch.clamp(input, min=-448, max=448, out=input)
input_fp8 = input.to(dtype).contiguous()
layout_params_input = TensorCoreFP8Layout.Params(scale=scale_input, orig_dtype=input_dtype, orig_shape=tuple(input_fp8.shape))
quantized_input = QuantizedTensor(input_fp8, TensorCoreFP8Layout, layout_params_input)
quantized_input = QuantizedTensor(input_fp8, "TensorCoreFP8Layout", layout_params_input)
# Wrap weight in QuantizedTensor - this enables unified dispatch
# Call F.linear - __torch_dispatch__ routes to fp8_linear handler in quant_ops.py!
layout_params_weight = TensorCoreFP8Layout.Params(scale=scale_weight, orig_dtype=input_dtype, orig_shape=tuple(w.shape))
quantized_weight = QuantizedTensor(w, TensorCoreFP8Layout, layout_params_weight)
quantized_weight = QuantizedTensor(w, "TensorCoreFP8Layout", layout_params_weight)
o = torch.nn.functional.linear(quantized_input, quantized_weight, bias)
uncast_bias_weight(self, w, bias, offload_stream)
@ -493,11 +493,12 @@ from .quant_ops import (
)
def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_precision_mm=False):
def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_precision_mm=False, disabled=[]):
class MixedPrecisionOps(manual_cast):
_quant_config = quant_config
_compute_dtype = compute_dtype
_full_precision_mm = full_precision_mm
_disabled = disabled
class Linear(torch.nn.Module, CastWeightBiasOp):
def __init__(
@ -522,6 +523,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
self.tensor_class = None
self._full_precision_mm = MixedPrecisionOps._full_precision_mm
self._full_precision_mm_config = False
def reset_parameters(self):
return None
@ -556,8 +558,12 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
self.weight = torch.nn.Parameter(weight.to(device=device, dtype=MixedPrecisionOps._compute_dtype), requires_grad=False)
else:
self.quant_format = layer_conf.get("format", None)
self._full_precision_mm_config = layer_conf.get("full_precision_matrix_mult", False)
if not self._full_precision_mm:
self._full_precision_mm = layer_conf.get("full_precision_matrix_mult", False)
self._full_precision_mm = self._full_precision_mm_config
if self.quant_format in MixedPrecisionOps._disabled:
self._full_precision_mm = True
if self.quant_format is None:
raise ValueError(f"Unknown quantization format for layer {layer_name}")
@ -630,7 +636,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
sd["{}weight_scale".format(prefix)] = self.weight._params.block_scale
quant_conf = {"format": self.quant_format}
if self._full_precision_mm:
if self._full_precision_mm_config:
quant_conf["full_precision_matrix_mult"] = True
sd["{}comfy_quant".format(prefix)] = torch.tensor(list(json.dumps(quant_conf).encode('utf-8')), dtype=torch.uint8)
return sd
@ -711,10 +717,17 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, model_config=None):
fp8_compute = comfy.model_management.supports_fp8_compute(load_device) # TODO: if we support more ops this needs to be more granular
nvfp4_compute = comfy.model_management.supports_nvfp4_compute(load_device)
if model_config and hasattr(model_config, 'quant_config') and model_config.quant_config:
logging.info("Using mixed precision operations")
return mixed_precision_ops(model_config.quant_config, compute_dtype, full_precision_mm=not fp8_compute)
disabled = set()
if not nvfp4_compute:
disabled.add("nvfp4")
if not fp8_compute:
disabled.add("float8_e4m3fn")
disabled.add("float8_e5m2")
return mixed_precision_ops(model_config.quant_config, compute_dtype, disabled=disabled)
if (
fp8_compute and

View File

@ -13,6 +13,13 @@ try:
get_layout_class,
)
_CK_AVAILABLE = True
if torch.version.cuda is None:
ck.registry.disable("cuda")
else:
cuda_version = tuple(map(int, str(torch.version.cuda).split('.')))
if cuda_version < (13,):
ck.registry.disable("cuda")
ck.registry.disable("triton")
for k, v in ck.list_backends().items():
logging.info(f"Found comfy_kitchen backend {k}: {v}")

View File

@ -13,7 +13,9 @@ from comfy_api_nodes.util import (
poll_op,
sync_op,
tensor_to_base64_string,
upload_video_to_comfyapi,
validate_audio_duration,
validate_video_duration,
)
@ -41,6 +43,12 @@ class Image2VideoInputField(BaseModel):
audio_url: str | None = Field(None)
class Reference2VideoInputField(BaseModel):
prompt: str = Field(...)
negative_prompt: str | None = Field(None)
reference_video_urls: list[str] = Field(...)
class Txt2ImageParametersField(BaseModel):
size: str = Field(...)
n: int = Field(1, description="Number of images to generate.") # we support only value=1
@ -76,6 +84,14 @@ class Image2VideoParametersField(BaseModel):
shot_type: str = Field("single")
class Reference2VideoParametersField(BaseModel):
size: str = Field(...)
duration: int = Field(5, ge=5, le=15)
shot_type: str = Field("single")
seed: int = Field(..., ge=0, le=2147483647)
watermark: bool = Field(False)
class Text2ImageTaskCreationRequest(BaseModel):
model: str = Field(...)
input: Text2ImageInputField = Field(...)
@ -100,6 +116,12 @@ class Image2VideoTaskCreationRequest(BaseModel):
parameters: Image2VideoParametersField = Field(...)
class Reference2VideoTaskCreationRequest(BaseModel):
model: str = Field(...)
input: Reference2VideoInputField = Field(...)
parameters: Reference2VideoParametersField = Field(...)
class TaskCreationOutputField(BaseModel):
task_id: str = Field(...)
task_status: str = Field(...)
@ -721,6 +743,143 @@ class WanImageToVideoApi(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanReferenceVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="WanReferenceVideoApi",
display_name="Wan Reference to Video",
category="api node/video/Wan",
description="Use the character and voice from input videos, combined with a prompt, "
"to generate a new video that maintains character consistency.",
inputs=[
IO.Combo.Input("model", options=["wan2.6-r2v"]),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. Supports English and Chinese. "
"Use identifiers such as `character1` and `character2` to refer to the reference characters.",
),
IO.String.Input(
"negative_prompt",
multiline=True,
default="",
tooltip="Negative prompt describing what to avoid.",
),
IO.Autogrow.Input(
"reference_videos",
template=IO.Autogrow.TemplateNames(
IO.Video.Input("reference_video"),
names=["character1", "character2", "character3"],
min=1,
),
),
IO.Combo.Input(
"size",
options=[
"720p: 1:1 (960x960)",
"720p: 16:9 (1280x720)",
"720p: 9:16 (720x1280)",
"720p: 4:3 (1088x832)",
"720p: 3:4 (832x1088)",
"1080p: 1:1 (1440x1440)",
"1080p: 16:9 (1920x1080)",
"1080p: 9:16 (1080x1920)",
"1080p: 4:3 (1632x1248)",
"1080p: 3:4 (1248x1632)",
],
),
IO.Int.Input(
"duration",
default=5,
min=5,
max=10,
step=5,
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
),
IO.Combo.Input(
"shot_type",
options=["single", "multi"],
tooltip="Specifies the shot type for the generated video, that is, whether the video is a "
"single continuous shot or multiple shots with cuts.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod
async def execute(
cls,
model: str,
prompt: str,
negative_prompt: str,
reference_videos: IO.Autogrow.Type,
size: str,
duration: int,
seed: int,
shot_type: str,
watermark: bool,
):
reference_video_urls = []
for i in reference_videos:
validate_video_duration(reference_videos[i], min_duration=2, max_duration=30)
for i in reference_videos:
reference_video_urls.append(await upload_video_to_comfyapi(cls, reference_videos[i]))
width, height = RES_IN_PARENS.search(size).groups()
initial_response = await sync_op(
cls,
ApiEndpoint(path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis", method="POST"),
response_model=TaskCreationResponse,
data=Reference2VideoTaskCreationRequest(
model=model,
input=Reference2VideoInputField(
prompt=prompt, negative_prompt=negative_prompt, reference_video_urls=reference_video_urls
),
parameters=Reference2VideoParametersField(
size=f"{width}*{height}",
duration=duration,
shot_type=shot_type,
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=6,
max_poll_attempts=280,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanApiExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -729,6 +888,7 @@ class WanApiExtension(ComfyExtension):
WanImageToImageApi,
WanTextToVideoApi,
WanImageToVideoApi,
WanReferenceVideoApi,
]

View File

@ -119,7 +119,7 @@ async def upload_video_to_comfyapi(
raise ValueError(f"Could not verify video duration from source: {e}") from e
upload_mime_type = f"video/{container.value.lower()}"
filename = f"uploaded_video.{container.value.lower()}"
filename = f"{uuid.uuid4()}.{container.value.lower()}"
# Convert VideoInput to BytesIO using specified container/codec
video_bytes_io = BytesIO()

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.7.0"
__version__ = "0.8.0"

View File

@ -2109,6 +2109,10 @@ EXTENSION_WEB_DIRS = {}
# Dictionary of successfully loaded module names and associated directories.
LOADED_MODULE_DIRS = {}
# Dictionary of import failure reasons keyed by module path.
# Used to provide diagnostic information in the import summary.
IMPORT_FAILED_REASONS: dict[str, str] = {}
def get_module_name(module_path: str) -> str:
"""
@ -2223,6 +2227,9 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).")
return False
except Exception as e:
# Capture one-line failure reason for the import summary
error_msg = str(e).split('\n')[0][:100] # First line, max 100 chars
IMPORT_FAILED_REASONS[module_path] = f"{type(e).__name__}: {error_msg}"
logging.warning(traceback.format_exc())
logging.warning(f"Cannot import {module_path} module for custom nodes: {e}")
return False
@ -2270,7 +2277,12 @@ async def init_external_custom_nodes():
if n[2]:
import_message = ""
else:
import_message = " (IMPORT FAILED)"
# Include failure reason if available
reason = IMPORT_FAILED_REASONS.get(n[1], "")
if reason:
import_message = f" (IMPORT FAILED: {reason})"
else:
import_message = " (IMPORT FAILED)"
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
logging.info("")

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.7.0"
version = "0.8.0"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@ -21,7 +21,7 @@ psutil
alembic
SQLAlchemy
av>=14.2.0
comfy-kitchen>=0.2.2
comfy-kitchen>=0.2.3
#non essential dependencies:
kornia>=0.7.1

View File

View File

@ -0,0 +1,89 @@
"""Tests for custom node import failure reason reporting."""
import pytest
import tempfile
import os
import shutil
from unittest.mock import patch, MagicMock
import asyncio
class TestImportFailureReasons:
"""Test that import failures include diagnostic information."""
def test_import_failure_reason_format(self):
"""Test that failure reason is formatted correctly."""
# Simulate the formatting logic
exception = ImportError("No module named 'missing_dep'")
error_msg = str(exception).split('\n')[0][:100]
reason = f"{type(exception).__name__}: {error_msg}"
assert reason == "ImportError: No module named 'missing_dep'"
def test_import_failure_reason_truncation(self):
"""Test that long error messages are truncated."""
long_msg = "a" * 200
exception = ValueError(long_msg)
error_msg = str(exception).split('\n')[0][:100]
reason = f"{type(exception).__name__}: {error_msg}"
# Should be truncated to 100 chars for the message part
assert len(error_msg) == 100
assert reason.startswith("ValueError: ")
def test_import_failure_reason_multiline(self):
"""Test that only first line of error is used."""
multi_line_msg = "First line\nSecond line\nThird line"
exception = RuntimeError(multi_line_msg)
error_msg = str(exception).split('\n')[0][:100]
reason = f"{type(exception).__name__}: {error_msg}"
assert reason == "RuntimeError: First line"
assert "Second line" not in reason
def test_import_failure_reason_various_exceptions(self):
"""Test formatting for various exception types."""
test_cases = [
(ModuleNotFoundError("No module named 'foo'"), "ModuleNotFoundError: No module named 'foo'"),
(SyntaxError("invalid syntax"), "SyntaxError: invalid syntax"),
(AttributeError("'NoneType' object has no attribute 'bar'"), "AttributeError: 'NoneType' object has no attribute 'bar'"),
(FileNotFoundError("[Errno 2] No such file"), "FileNotFoundError: [Errno 2] No such file"),
]
for exception, expected in test_cases:
error_msg = str(exception).split('\n')[0][:100]
reason = f"{type(exception).__name__}: {error_msg}"
assert reason == expected, f"Failed for {type(exception).__name__}"
class TestImportSummaryOutput:
"""Test the import summary output format."""
def test_summary_message_with_reason(self):
"""Test that summary includes reason when available."""
reason = "ImportError: No module named 'xyz'"
import_message = f" (IMPORT FAILED: {reason})"
assert import_message == " (IMPORT FAILED: ImportError: No module named 'xyz')"
def test_summary_message_without_reason(self):
"""Test fallback when no reason is available."""
reason = ""
if reason:
import_message = f" (IMPORT FAILED: {reason})"
else:
import_message = " (IMPORT FAILED)"
assert import_message == " (IMPORT FAILED)"
def test_summary_format_string(self):
"""Test the full summary line format."""
time_taken = 0.05
import_message = " (IMPORT FAILED: ImportError: missing module)"
module_path = "/path/to/custom_nodes/my_node"
summary_line = "{:6.1f} seconds{}: {}".format(time_taken, import_message, module_path)
assert "0.1 seconds" in summary_line
assert "(IMPORT FAILED: ImportError: missing module)" in summary_line
assert module_path in summary_line