mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-23 13:00:54 +08:00
Added new feature/fix for ComfyUI
This commit is contained in:
parent
95acb06959
commit
460e9e0d8a
@ -1079,11 +1079,15 @@ def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, str
|
|||||||
if dtype is None or weight.dtype == dtype:
|
if dtype is None or weight.dtype == dtype:
|
||||||
return weight
|
return weight
|
||||||
if stream is not None:
|
if stream is not None:
|
||||||
|
if not hasattr(stream, "__enter__"):
|
||||||
|
logging.error(f"Stream object {stream} of type {type(stream)} does not have __enter__ method")
|
||||||
with stream:
|
with stream:
|
||||||
return weight.to(dtype=dtype, copy=copy)
|
return weight.to(dtype=dtype, copy=copy)
|
||||||
return weight.to(dtype=dtype, copy=copy)
|
return weight.to(dtype=dtype, copy=copy)
|
||||||
|
|
||||||
if stream is not None:
|
if stream is not None:
|
||||||
|
if not hasattr(stream, "__enter__"):
|
||||||
|
logging.error(f"Stream object {stream} of type {type(stream)} does not have __enter__ method")
|
||||||
with stream:
|
with stream:
|
||||||
r = torch.empty_like(weight, dtype=dtype, device=device)
|
r = torch.empty_like(weight, dtype=dtype, device=device)
|
||||||
r.copy_(weight, non_blocking=non_blocking)
|
r.copy_(weight, non_blocking=non_blocking)
|
||||||
@ -1525,33 +1529,6 @@ def throw_exception_if_processing_interrupted():
|
|||||||
if interrupt_processing:
|
if interrupt_processing:
|
||||||
interrupt_processing = False
|
interrupt_processing = False
|
||||||
raise InterruptProcessingException()
|
raise InterruptProcessingException()
|
||||||
"""
|
|
||||||
def unload_all_models_full():
|
|
||||||
"""
|
|
||||||
Completely unloads all models from RAM and GPU when the user cancels.
|
|
||||||
Frees CPU RAM, GPU VRAM, and clears python references.
|
|
||||||
"""
|
|
||||||
global current_loaded_models
|
|
||||||
try:
|
|
||||||
# Unload every model object
|
|
||||||
for m in current_loaded_models:
|
|
||||||
try:
|
|
||||||
m.model_unload(memory_to_free=None, unpatch_weights=True)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
current_loaded_models.clear()
|
|
||||||
|
|
||||||
# Force Python GC
|
|
||||||
gc.collect()
|
|
||||||
|
|
||||||
# Clear GPU memory
|
|
||||||
soft_empty_cache(force=True)
|
|
||||||
|
|
||||||
logging.info("All models unloaded successfully (manual full unload).")
|
|
||||||
except Exception as e:
|
|
||||||
logging.warning(f"Model unload warning: {e}")
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def cleanup_ram():
|
def cleanup_ram():
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
35
nodes.py
35
nodes.py
@ -43,6 +43,9 @@ import folder_paths
|
|||||||
import latent_preview
|
import latent_preview
|
||||||
import node_helpers
|
import node_helpers
|
||||||
|
|
||||||
|
if getattr(args, "enable_manager", False):
|
||||||
|
import comfyui_manager
|
||||||
|
|
||||||
def before_node_execution():
|
def before_node_execution():
|
||||||
comfy.model_management.throw_exception_if_processing_interrupted()
|
comfy.model_management.throw_exception_if_processing_interrupted()
|
||||||
|
|
||||||
@ -692,8 +695,10 @@ class LoraLoaderModelOnly(LoraLoader):
|
|||||||
return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)
|
return (self.load_lora(model, None, lora_name, strength_model, 0)[0],)
|
||||||
|
|
||||||
class VAELoader:
|
class VAELoader:
|
||||||
|
video_taes = ["taehv", "lighttaew2_2", "lighttaew2_1", "lighttaehy1_5"]
|
||||||
|
image_taes = ["taesd", "taesdxl", "taesd3", "taef1"]
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def vae_list():
|
def vae_list(s):
|
||||||
vaes = folder_paths.get_filename_list("vae")
|
vaes = folder_paths.get_filename_list("vae")
|
||||||
approx_vaes = folder_paths.get_filename_list("vae_approx")
|
approx_vaes = folder_paths.get_filename_list("vae_approx")
|
||||||
sdxl_taesd_enc = False
|
sdxl_taesd_enc = False
|
||||||
@ -722,6 +727,11 @@ class VAELoader:
|
|||||||
f1_taesd_dec = True
|
f1_taesd_dec = True
|
||||||
elif v.startswith("taef1_decoder."):
|
elif v.startswith("taef1_decoder."):
|
||||||
f1_taesd_enc = True
|
f1_taesd_enc = True
|
||||||
|
else:
|
||||||
|
for tae in s.video_taes:
|
||||||
|
if v.startswith(tae):
|
||||||
|
vaes.append(v)
|
||||||
|
|
||||||
if sd1_taesd_dec and sd1_taesd_enc:
|
if sd1_taesd_dec and sd1_taesd_enc:
|
||||||
vaes.append("taesd")
|
vaes.append("taesd")
|
||||||
if sdxl_taesd_dec and sdxl_taesd_enc:
|
if sdxl_taesd_dec and sdxl_taesd_enc:
|
||||||
@ -765,7 +775,7 @@ class VAELoader:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "vae_name": (s.vae_list(), )}}
|
return {"required": { "vae_name": (s.vae_list(s), )}}
|
||||||
RETURN_TYPES = ("VAE",)
|
RETURN_TYPES = ("VAE",)
|
||||||
FUNCTION = "load_vae"
|
FUNCTION = "load_vae"
|
||||||
|
|
||||||
@ -776,10 +786,13 @@ class VAELoader:
|
|||||||
if vae_name == "pixel_space":
|
if vae_name == "pixel_space":
|
||||||
sd = {}
|
sd = {}
|
||||||
sd["pixel_space_vae"] = torch.tensor(1.0)
|
sd["pixel_space_vae"] = torch.tensor(1.0)
|
||||||
elif vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]:
|
elif vae_name in self.image_taes:
|
||||||
sd = self.load_taesd(vae_name)
|
sd = self.load_taesd(vae_name)
|
||||||
else:
|
else:
|
||||||
vae_path = folder_paths.get_full_path_or_raise("vae", vae_name)
|
if os.path.splitext(vae_name)[0] in self.video_taes:
|
||||||
|
vae_path = folder_paths.get_full_path_or_raise("vae_approx", vae_name)
|
||||||
|
else:
|
||||||
|
vae_path = folder_paths.get_full_path_or_raise("vae", vae_name)
|
||||||
sd = comfy.utils.load_torch_file(vae_path)
|
sd = comfy.utils.load_torch_file(vae_path)
|
||||||
vae = comfy.sd.VAE(sd=sd)
|
vae = comfy.sd.VAE(sd=sd)
|
||||||
vae.throw_exception_if_invalid()
|
vae.throw_exception_if_invalid()
|
||||||
@ -929,7 +942,7 @@ class CLIPLoader:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
|
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
|
||||||
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2"], ),
|
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis"], ),
|
||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"device": (["default", "cpu"], {"advanced": True}),
|
"device": (["default", "cpu"], {"advanced": True}),
|
||||||
@ -957,7 +970,7 @@ class DualCLIPLoader:
|
|||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
|
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
|
||||||
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
|
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
|
||||||
"type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15"], ),
|
"type": (["sdxl", "sd3", "flux", "hunyuan_video", "hidream", "hunyuan_image", "hunyuan_video_15", "kandinsky5", "kandinsky5_image"], ),
|
||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"device": (["default", "cpu"], {"advanced": True}),
|
"device": (["default", "cpu"], {"advanced": True}),
|
||||||
@ -2233,6 +2246,12 @@ async def init_external_custom_nodes():
|
|||||||
if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes:
|
if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes:
|
||||||
logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes")
|
logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if getattr(args, "enable_manager", False):
|
||||||
|
if comfyui_manager.should_be_disabled(module_path):
|
||||||
|
logging.info(f"Blocked by policy: {module_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
time_before = time.perf_counter()
|
time_before = time.perf_counter()
|
||||||
success = await load_custom_node(module_path, base_node_names, module_parent="custom_nodes")
|
success = await load_custom_node(module_path, base_node_names, module_parent="custom_nodes")
|
||||||
node_import_times.append((time.perf_counter() - time_before, module_path, success))
|
node_import_times.append((time.perf_counter() - time_before, module_path, success))
|
||||||
@ -2336,7 +2355,9 @@ async def init_builtin_extra_nodes():
|
|||||||
"nodes_easycache.py",
|
"nodes_easycache.py",
|
||||||
"nodes_audio_encoder.py",
|
"nodes_audio_encoder.py",
|
||||||
"nodes_rope.py",
|
"nodes_rope.py",
|
||||||
|
"nodes_logic.py",
|
||||||
"nodes_nop.py",
|
"nodes_nop.py",
|
||||||
|
"nodes_kandinsky5.py",
|
||||||
]
|
]
|
||||||
|
|
||||||
import_failed = []
|
import_failed = []
|
||||||
@ -2428,4 +2449,4 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True):
|
|||||||
logging.warning("Please do a: pip install -r requirements.txt")
|
logging.warning("Please do a: pip install -r requirements.txt")
|
||||||
logging.warning("")
|
logging.warning("")
|
||||||
|
|
||||||
return import_failed
|
return import_failed
|
||||||
27
reproduce_stream_error.py
Normal file
27
reproduce_stream_error.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
|
||||||
|
import torch
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
def test_stream():
|
||||||
|
if not torch.cuda.is_available():
|
||||||
|
print("CUDA not available, cannot test cuda stream")
|
||||||
|
return
|
||||||
|
|
||||||
|
device = torch.device("cuda")
|
||||||
|
stream = torch.cuda.Stream(device=device, priority=0)
|
||||||
|
|
||||||
|
print(f"Stream type: {type(stream)}")
|
||||||
|
print(f"Has __enter__: {hasattr(stream, '__enter__')}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with stream:
|
||||||
|
print("Stream context manager works")
|
||||||
|
except AttributeError as e:
|
||||||
|
print(f"AttributeError caught: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Other exception caught: {e}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
test_stream()
|
||||||
Loading…
Reference in New Issue
Block a user