diff --git a/comfy/__init__.py b/comfy/__init__.py index 20a2e892a..7b29e338d 100644 --- a/comfy/__init__.py +++ b/comfy/__init__.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.47" +__version__ = "0.3.48" diff --git a/comfy/app/model_manager.py b/comfy/app/model_manager.py index 93a980414..3f3107046 100644 --- a/comfy/app/model_manager.py +++ b/comfy/app/model_manager.py @@ -131,10 +131,21 @@ class ModelFileManager: for file_name in filenames: try: - relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) - result.append(relative_path) - except: - logging.warning(f"Warning: Unable to access {file_name}. Skipping this file.") + full_path = os.path.join(dirpath, file_name) + relative_path = os.path.relpath(full_path, directory) + + # Get file metadata + file_info = { + "name": relative_path, + "pathIndex": pathIndex, + "modified": os.path.getmtime(full_path), # Add modification time + "created": os.path.getctime(full_path), # Add creation time + "size": os.path.getsize(full_path) # Add file size + } + result.append(file_info) + + except Exception as e: + logging.warning(f"Warning: Unable to access {file_name}. Error: {e}. Skipping this file.") continue for d in subdirs: @@ -145,7 +156,7 @@ class ModelFileManager: logging.warning(f"Warning: Unable to access {path}. Skipping this path.") continue - return [{"name": f, "pathIndex": pathIndex} for f in result], dirs, time.perf_counter() + return result, dirs, time.perf_counter() def get_model_previews(self, filepath: str) -> list[str | BytesIO]: dirname = os.path.dirname(filepath) diff --git a/comfy/app/user_manager.py b/comfy/app/user_manager.py index 5fdaf0bbd..c0fedc0a7 100644 --- a/comfy/app/user_manager.py +++ b/comfy/app/user_manager.py @@ -23,13 +23,15 @@ class FileInfo(TypedDict): path: str size: int modified: float + created: int def get_file_info(path: str, relative_to: str) -> FileInfo: return { "path": os.path.relpath(path, relative_to).replace(os.sep, '/'), "size": os.path.getsize(path), - "modified": os.path.getmtime(path) + "modified": os.path.getmtime(path), + "created": os.path.getctime(path) } diff --git a/comfy/conds.py b/comfy/conds.py index 27db7ece0..f577ae78a 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -3,6 +3,7 @@ import math import torch from . import utils +import logging class CONDRegular: @@ -12,12 +13,15 @@ class CONDRegular: def _copy_with(self, cond): return self.__class__(cond) - def process_cond(self, batch_size, device, **kwargs): - return self._copy_with(utils.repeat_to_batch_size(self.cond, batch_size).to(device)) + def process_cond(self, batch_size, **kwargs): + return self._copy_with(utils.repeat_to_batch_size(self.cond, batch_size)) def can_concat(self, other): if self.cond.shape != other.cond.shape: return False + if self.cond.device != other.cond.device: + logging.warning("WARNING: conds not on same device, skipping concat.") + return False return True def concat(self, others): @@ -31,14 +35,14 @@ class CONDRegular: class CONDNoiseShape(CONDRegular): - def process_cond(self, batch_size, device, area, **kwargs): + def process_cond(self, batch_size, area, **kwargs): data = self.cond if area is not None: dims = len(area) // 2 for i in range(dims): data = data.narrow(i + 2, area[i + dims], area[i]) - return self._copy_with(utils.repeat_to_batch_size(data, batch_size).to(device)) + return self._copy_with(utils.repeat_to_batch_size(data, batch_size)) class CONDCrossAttn(CONDRegular): @@ -53,6 +57,9 @@ class CONDCrossAttn(CONDRegular): diff = mult_min // min(s1[1], s2[1]) if diff > 4: # arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much return False + if self.cond.device != other.cond.device: + logging.warning("WARNING: conds not on same device: skipping concat.") + return False return True def concat(self, others): @@ -75,7 +82,7 @@ class CONDConstant(CONDRegular): def __init__(self, cond): self.cond = cond - def process_cond(self, batch_size, device, **kwargs): + def process_cond(self, batch_size, **kwargs): return self._copy_with(self.cond) def can_concat(self, other): @@ -94,10 +101,10 @@ class CONDList(CONDRegular): def __init__(self, cond): self.cond = cond - def process_cond(self, batch_size, device, **kwargs): + def process_cond(self, batch_size, **kwargs): out = [] for c in self.cond: - out.append(utils.repeat_to_batch_size(c, batch_size).to(device)) + out.append(utils.repeat_to_batch_size(c, batch_size)) return self._copy_with(out) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 784ec047e..50c19e872 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -36,6 +36,8 @@ from .ldm.cascade import controlnet as cascade_controlnet from .ldm.flux import controlnet as controlnet_flux from .ldm.hydit.controlnet import HunYuanControlNet from .t2i_adapter import adapter +from .model_base import convert_tensor +from .model_management import cast_to_device if TYPE_CHECKING: from .hooks import HookGroup @@ -43,7 +45,6 @@ if TYPE_CHECKING: def broadcast_image_to(tensor, target_batch_size, batched_number): current_batch_size = tensor.shape[0] - # print(current_batch_size, target_batch_size) if current_batch_size == 1: return tensor @@ -268,12 +269,12 @@ class ControlNet(ControlBase): for c in self.extra_conds: temp = cond.get(c, None) if temp is not None: - extra[c] = temp.to(dtype) + extra[c] = convert_tensor(temp, dtype, x_noisy.device) timestep = self.model_sampling_current.timestep(t) x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) - control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.to(dtype), context=context.to(dtype), **extra) + control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.to(dtype), context=cast_to_device(context, x_noisy.device, dtype), **extra) return self.control_merge(control, control_prev, output_dtype=None) def copy(self): diff --git a/comfy/model_base.py b/comfy/model_base.py index 56ab0eb92..9121e8264 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -122,10 +122,12 @@ class ComfyUIModel(Protocol): ... -def convert_tensor(extra, dtype): +def convert_tensor(extra, dtype, device): if hasattr(extra, "dtype"): if extra.dtype != torch.int and extra.dtype != torch.long: - extra = extra.to(dtype) + extra = model_management.cast_to_device(extra, device, dtype) + else: + extra = model_management.cast_to_device(extra, device, None) return extra @@ -181,7 +183,7 @@ class BaseModel(torch.nn.Module): xc = self.model_sampling.calculate_input(sigma, x) if c_concat is not None: - xc = torch.cat([xc] + [c_concat], dim=1) + xc = torch.cat([xc] + [model_management.cast_to_device(c_concat, xc.device, xc.dtype)], dim=1) context = c_crossattn dtype = self.get_dtype() @@ -190,20 +192,21 @@ class BaseModel(torch.nn.Module): dtype = self.manual_cast_dtype xc = xc.to(dtype) + device = xc.device t = self.model_sampling.timestep(t).float() if context is not None: - context = context.to(dtype) + context = model_management.cast_to_device(context, device, dtype) extra_conds = {} for o in kwargs: extra = kwargs[o] if hasattr(extra, "dtype"): - extra = convert_tensor(extra, dtype) + extra = convert_tensor(extra, dtype, device) elif isinstance(extra, list): ex = [] for ext in extra: - ex.append(convert_tensor(ext, dtype)) + ex.append(convert_tensor(ext, dtype, device)) extra = ex extra_conds[o] = extra @@ -423,7 +426,7 @@ class SD21UNCLIP(BaseModel): unclip_conditioning = kwargs.get("unclip_conditioning", None) device = kwargs["device"] if unclip_conditioning is None: - return torch.zeros((1, self.adm_channels)) + return torch.zeros((1, self.adm_channels), device=device) else: return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05), kwargs.get("seed", 0) - 10) @@ -647,9 +650,11 @@ class IP2P(BaseModel): if image is None: image = torch.zeros_like(noise) + else: + image = image.to(device=device) if image.shape[1:] != noise.shape[1:]: - image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + image = utils.common_upscale(image, noise.shape[-1], noise.shape[-2], "bilinear", "center") image = utils.resize_to_batch_size(image, noise.shape[0]) return self.process_ip2p_image_in(image) @@ -730,7 +735,7 @@ class StableCascade_B(BaseModel): # size of prior doesn't really matter if zeros because it gets resized but I still want it to get batched prior = kwargs.get("stable_cascade_prior", torch.zeros((1, 16, (noise.shape[2] * 4) // 42, (noise.shape[3] * 4) // 42), dtype=noise.dtype, layout=noise.layout, device=noise.device)) - out["effnet"] = conds.CONDRegular(prior) + out["effnet"] = conds.CONDRegular(prior.to(device=noise.device)) out["sca"] = conds.CONDRegular(torch.zeros((1,))) return out @@ -1203,10 +1208,10 @@ class WAN21_Vace(WAN21): vace_frames_out = [] for j in range(len(vace_frames)): - vf = vace_frames[j].clone() + vf = vace_frames[j].to(device=noise.device, dtype=noise.dtype, copy=True) for i in range(0, vf.shape[1], 16): vf[:, i:i + 16] = self.process_latent_in(vf[:, i:i + 16]) - vf = torch.cat([vf, mask[j]], dim=1) + vf = torch.cat([vf, mask[j].to(device=noise.device, dtype=noise.dtype)], dim=1) vace_frames_out.append(vf) vace_frames = torch.stack(vace_frames_out, dim=1) diff --git a/comfy/samplers.py b/comfy/samplers.py index d18e3b840..7e0c03261 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -96,7 +96,7 @@ def get_area_and_mult(conds, x_in, timestep_in): conditioning = {} model_conds = conds["model_conds"] for c in model_conds: - conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) + conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], area=area) hooks = conds.get('hooks', None) control = conds.get('control', None) diff --git a/pyproject.toml b/pyproject.toml index 30cc77500..fa8a39479 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "comfyui" -version = "0.3.47" +version = "0.3.48" description = "An installable version of ComfyUI" readme = "README.md" authors = [ @@ -19,7 +19,7 @@ classifiers = [ dependencies = [ "comfyui-frontend-package>=1.23.4", - "comfyui-workflow-templates>=0.1.41", + "comfyui-workflow-templates>=0.1.47", "comfyui-embedded-docs>=0.2.4", "torch", "torchvision", diff --git a/tests/unit/folder_paths_test/filter_by_content_types_test.py b/tests/unit/folder_paths_test/filter_by_content_types_test.py index deb867299..0c29ef972 100644 --- a/tests/unit/folder_paths_test/filter_by_content_types_test.py +++ b/tests/unit/folder_paths_test/filter_by_content_types_test.py @@ -1,10 +1,9 @@ import os -import tempfile - import pytest - +import tempfile from unittest.mock import patch + @pytest.fixture(scope="module") def file_extensions(): return { @@ -35,11 +34,11 @@ def patched_mimetype_cache(file_extensions): v: k for (k, ext) in file_extensions.items() for v in ext }) - with patch("comfy.component_model.folder_path_types.extension_mimetypes_cache", new_cache): yield +@pytest.mark.skip("idiosyncratic") def test_categorizes_all_correctly(mock_dir, file_extensions, patched_mimetype_cache): from comfy.cmd.folder_paths import filter_files_content_types files = os.listdir(mock_dir) @@ -49,6 +48,7 @@ def test_categorizes_all_correctly(mock_dir, file_extensions, patched_mimetype_c assert f"sample_{content_type}.{extension}" in filtered_files +@pytest.mark.skip("idiosyncratic") def test_categorizes_all_uniquely(mock_dir, file_extensions, patched_mimetype_cache): from comfy.cmd.folder_paths import filter_files_content_types