diff --git a/comfy/app/frontend_management.py b/comfy/app/frontend_management.py index 873dce61e..419ed4d02 100644 --- a/comfy/app/frontend_management.py +++ b/comfy/app/frontend_management.py @@ -151,7 +151,7 @@ class FrontendManager: def get_installed_templates_version(cls) -> str: """Get the currently installed workflow templates package version.""" try: - templates_version_str = version("comfyui-workflow-templates") + templates_version_str = importlib.metadata.version("comfyui-workflow-templates") return templates_version_str except Exception: return None diff --git a/comfy/cmd/main.py b/comfy/cmd/main.py index c5748964e..3585fd3c7 100644 --- a/comfy/cmd/main.py +++ b/comfy/cmd/main.py @@ -55,7 +55,7 @@ async def _prompt_worker(q: AbstractPromptQueue, server_instance: server_module. if args.cache_lru > 0: cache_type = execution.CacheType.LRU elif args.cache_none: - cache_type = execution.CacheType.DEPENDENCY_AWARE + cache_type = execution.CacheType.NONE e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru) last_gc_collect = 0 diff --git a/comfy/ldm/mmaudio/vae/activations.py b/comfy/ldm/mmaudio/vae/activations.py index 81aa6ab3c..529c0723b 100644 --- a/comfy/ldm/mmaudio/vae/activations.py +++ b/comfy/ldm/mmaudio/vae/activations.py @@ -2,7 +2,8 @@ # LICENSE is in incl_licenses directory. import torch -from torch import nn, sin, pow +from torch import nn +from torch import sin, pow # pylint: disable=no-name-in-module from torch.nn import Parameter from ....model_management import cast_to diff --git a/comfy/ldm/mmaudio/vae/vae.py b/comfy/ldm/mmaudio/vae/vae.py index fc8773d69..f294ec4e1 100644 --- a/comfy/ldm/mmaudio/vae/vae.py +++ b/comfy/ldm/mmaudio/vae/vae.py @@ -9,6 +9,7 @@ from .distributions import DiagonalGaussianDistribution from ....ops import disable_weight_init as ops +from ....model_management import cast_to DATA_MEAN_80D = [ -1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927, @@ -126,10 +127,10 @@ class VAE(nn.Module): return dec def normalize(self, x: torch.Tensor) -> torch.Tensor: - return (x - comfy.model_management.cast_to(self.data_mean, dtype=x.dtype, device=x.device)) / comfy.model_management.cast_to(self.data_std, dtype=x.dtype, device=x.device) + return (x - cast_to(self.data_mean, dtype=x.dtype, device=x.device)) / cast_to(self.data_std, dtype=x.dtype, device=x.device) def unnormalize(self, x: torch.Tensor) -> torch.Tensor: - return x * comfy.model_management.cast_to(self.data_std, dtype=x.dtype, device=x.device) + comfy.model_management.cast_to(self.data_mean, dtype=x.dtype, device=x.device) + return x * cast_to(self.data_std, dtype=x.dtype, device=x.device) + cast_to(self.data_mean, dtype=x.dtype, device=x.device) def forward( self, @@ -142,7 +143,9 @@ class VAE(nn.Module): posterior = self.encode(x, normalize=normalize) if sample_posterior: - z = posterior.sample(rng) + raise RuntimeError("error in implementation, posterior doesn't accept this arg") + # todo: fix this in upstream? + # z = posterior.sample(rng) else: z = posterior.mode() dec = self.decode(z, unnormalize=unnormalize) diff --git a/comfy/model_downloader.py b/comfy/model_downloader.py index 3231bfaa8..adc5c62b6 100644 --- a/comfy/model_downloader.py +++ b/comfy/model_downloader.py @@ -40,6 +40,10 @@ def get_filename_list(folder_name: str) -> Sequence[str]: return get_filename_list_with_downloadable(folder_name) +def get_folder_paths(*args, **kwargs): + return folder_paths.get_folder_paths(*args, **kwargs) + + def get_filename_list_with_downloadable(folder_name: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> DownloadableFileList | list[str]: if known_files is None: known_files = _get_known_models_for_folder_name(folder_name) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 50ce5d402..453d812e6 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -156,7 +156,7 @@ class LowVramPatch: intermediate_dtype = torch.float32 out = lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype) if self.set_func is None: - return float.stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key)) + return stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key)) else: return self.set_func(out, seed=string_to_seed(self.key), return_weight=True) diff --git a/comfy/ops.py b/comfy/ops.py index 8a5e9681d..b226d2001 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -77,11 +77,11 @@ except Exception as exc_info: NVIDIA_MEMORY_CONV_BUG_WORKAROUND = False try: - if comfy.model_management.is_nvidia(): - if torch.backends.cudnn.version() >= 91002 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10): + if model_management.is_nvidia(): + if torch.backends.cudnn.version() >= 91002 and model_management.torch_version_numeric >= (2, 9) and model_management.torch_version_numeric <= (2, 10): # TODO: change upper bound version once it's fixed' NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True - logging.info("working around nvidia conv3d memory bug.") + logger.info("working around nvidia conv3d memory bug.") except: pass diff --git a/comfy/samplers.py b/comfy/samplers.py index 53762fd12..147ba2895 100755 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -324,7 +324,7 @@ def _calc_cond_batch(model: BaseModel, conds, x_in: torch.Tensor, timestep: torc copy_dict1=False) if patches is not None: - transformer_options["patches"] = comfy.patcher_extension.merge_nested_dicts( + transformer_options["patches"] = patcher_extension.merge_nested_dicts( transformer_options.get("patches", {}), patches ) diff --git a/comfy/text_encoders/lumina2.py b/comfy/text_encoders/lumina2.py index 788f87c9c..f4834ce4d 100644 --- a/comfy/text_encoders/lumina2.py +++ b/comfy/text_encoders/lumina2.py @@ -1,4 +1,4 @@ -from .llama import Gemma2_2B +from .llama import Gemma2_2B, Gemma3_4B from .spiece_tokenizer import SPieceTokenizer from .. import sd1_clip @@ -40,7 +40,7 @@ class Gemma2_2BModel(sd1_clip.SDClipModel): class Gemma3_4BModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}): - super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) + super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=Gemma3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options) class LuminaModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None, model_options=None, name="gemma2_2b", clip_model=Gemma2_2BModel): @@ -51,6 +51,7 @@ class LuminaModel(sd1_clip.SD1ClipModel): def te(dtype_llama=None, llama_scaled_fp8=None, model_type="gemma2_2b"): + model = None if model_type == "gemma2_2b": model = Gemma2_2BModel elif model_type == "gemma3_4b": diff --git a/comfy_extras/nodes/nodes_clip_sdxl.py b/comfy_extras/nodes/nodes_clip_sdxl.py index 69ad2fb80..b03582017 100644 --- a/comfy_extras/nodes/nodes_clip_sdxl.py +++ b/comfy_extras/nodes/nodes_clip_sdxl.py @@ -1,5 +1,5 @@ from typing_extensions import override -from comfy.common import MAX_RESOLUTION +from comfy.nodes.common import MAX_RESOLUTION from comfy_api.latest import ComfyExtension, io diff --git a/comfy_extras/nodes/nodes_compositing.py b/comfy_extras/nodes/nodes_compositing.py index 9638b718f..bf2e837fc 100644 --- a/comfy_extras/nodes/nodes_compositing.py +++ b/comfy_extras/nodes/nodes_compositing.py @@ -7,7 +7,7 @@ from skimage import exposure import comfy.utils from comfy.component_model.tensor_types import RGBImageBatch, ImageBatch, MaskBatch from comfy.nodes.package_typing import CustomNode - +from comfy_api.latest import io def resize_mask(mask, shape): return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1) diff --git a/comfy_extras/nodes/nodes_flux.py b/comfy_extras/nodes/nodes_flux.py index 0b50bac68..78a491286 100644 --- a/comfy_extras/nodes/nodes_flux.py +++ b/comfy_extras/nodes/nodes_flux.py @@ -29,7 +29,8 @@ class CLIPTextEncodeFlux(io.ComfyNode): tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"] return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance})) - encode = execute # TODO: remove + + encode = execute # TODO: remove class FluxGuidance(io.ComfyNode): @@ -75,7 +76,8 @@ class FluxDisableGuidance(io.ComfyNode): def execute(cls, conditioning) -> io.NodeOutput: c = node_helpers.conditioning_set_values(conditioning, {"guidance": None}) return io.NodeOutput(c) - append = execute # TODO: remove + + append = execute # TODO: remove class FluxKontextImageScale(io.ComfyNode): diff --git a/comfy_extras/nodes/nodes_lt.py b/comfy_extras/nodes/nodes_lt.py index b60efc2e4..6acf185c2 100644 --- a/comfy_extras/nodes/nodes_lt.py +++ b/comfy_extras/nodes/nodes_lt.py @@ -4,6 +4,8 @@ import av import numpy as np import torch +from io import BytesIO + import comfy.model_management import comfy.model_sampling import comfy.utils @@ -11,6 +13,7 @@ from comfy import node_helpers from comfy.ldm.lightricks.symmetric_patchifier import SymmetricPatchifier, latent_to_pixel_coords from comfy.nodes import base_nodes as nodes from comfy_api.latest import ComfyExtension, io +from typing_extensions import override class EmptyLTXVLatentVideo(io.ComfyNode): @classmethod