From 2e49b5582ff1f5812eec5526a1fe13f8fe0137cf Mon Sep 17 00:00:00 2001 From: XiaoJiJi Date: Thu, 19 Mar 2026 03:41:50 +0000 Subject: [PATCH] fix: replace bare except with specific exception types --- app/model_manager.py | 3 ++- comfy/ops.py | 2 +- comfy_extras/nodes_freelunch.py | 8 ++++---- comfy_extras/nodes_hypernetwork.py | 2 +- comfy_extras/nodes_lora_extract.py | 4 ++-- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/app/model_manager.py b/app/model_manager.py index f124d1117..113ef32aa 100644 --- a/app/model_manager.py +++ b/app/model_manager.py @@ -73,7 +73,8 @@ class ModelFileManager: img.save(img_bytes, format="WEBP") img_bytes.seek(0) return web.Response(body=img_bytes.getvalue(), content_type="image/webp") - except: + except (IOError, OSError, ValueError, Image.DecompressionBombError) as e: + logging.warning(f"Failed to load preview image {default_preview}: {e}") return web.Response(status=404) def get_model_file_list(self, folder_name: str): diff --git a/comfy/ops.py b/comfy/ops.py index 87b36b5c5..dfd4f4988 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -70,7 +70,7 @@ try: #TODO: change upper bound version once it's fixed' NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True logging.info("working around nvidia conv3d memory bug.") -except: +except (RuntimeError, AttributeError, TypeError): pass cast_to = comfy.model_management.cast_to #TODO: remove once no more references diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 248efdef3..1b3cc363f 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -55,8 +55,8 @@ class FreeU(IO.ComfyNode): if hsp.device not in on_cpu_devices: try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) - except: - logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) + except (RuntimeError, TypeError) as e: + logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU. Error: {}".format(hsp.device, e)) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: @@ -109,8 +109,8 @@ class FreeU_V2(IO.ComfyNode): if hsp.device not in on_cpu_devices: try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) - except: - logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) + except (RuntimeError, TypeError) as e: + logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU. Error: {}".format(hsp.device, e)) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index 2a6a87a81..8b247c470 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -35,7 +35,7 @@ def load_hypernetwork_patch(path, strength): for d in sd: try: dim = int(d) - except: + except (ValueError, TypeError): continue output = [] diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index 975f90f45..a5d14c261 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -75,8 +75,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora out = extract_lora(weight_diff, rank) output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() - except: - logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + except (RuntimeError, ValueError, TypeError, torch.linalg.LinAlgError) as e: + logging.warning("Could not generate lora weights for key {}, is the weight difference a zero? Error: {}".format(k, e)) elif lora_type == LORAType.FULL_DIFF: output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()