diff --git a/app/app_settings.py b/app/app_settings.py index c7ac73bf6..6fd2fbaff 100644 --- a/app/app_settings.py +++ b/app/app_settings.py @@ -21,7 +21,7 @@ class AppSettings(): try: with open(file) as f: return json.load(f) - except: + except json.JSONDecodeError: logging.error(f"The user settings file is corrupted: {file}") return {} else: diff --git a/app/model_manager.py b/app/model_manager.py index f124d1117..113ef32aa 100644 --- a/app/model_manager.py +++ b/app/model_manager.py @@ -73,7 +73,8 @@ class ModelFileManager: img.save(img_bytes, format="WEBP") img_bytes.seek(0) return web.Response(body=img_bytes.getvalue(), content_type="image/webp") - except: + except (IOError, OSError, ValueError, Image.DecompressionBombError) as e: + logging.warning(f"Failed to load preview image {default_preview}: {e}") return web.Response(status=404) def get_model_file_list(self, folder_name: str): diff --git a/comfy/ops.py b/comfy/ops.py index b5cd1d47e..f0776b709 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -70,7 +70,7 @@ try: #TODO: change upper bound version once it's fixed' NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True logging.info("working around nvidia conv3d memory bug.") -except: +except (RuntimeError, AttributeError, TypeError): pass cast_to = comfy.model_management.cast_to #TODO: remove once no more references diff --git a/comfy/sd.py b/comfy/sd.py index e1a2840d2..2de954e1f 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1118,19 +1118,19 @@ class VAE: def spacial_compression_decode(self): try: return self.upscale_ratio[-1] - except: + except (IndexError, TypeError): return self.upscale_ratio def spacial_compression_encode(self): try: return self.downscale_ratio[-1] - except: + except (IndexError, TypeError): return self.downscale_ratio def temporal_compression_decode(self): try: return round(self.upscale_ratio[0](8192) / 8192) - except: + except (IndexError, TypeError, AttributeError): return None diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 897186bba..395fdb751 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -358,7 +358,7 @@ def token_weights(string, current_weight): try: weight = float(x[xx+1:]) x = x[:xx] - except: + except ValueError: pass out += token_weights(x, weight) else: @@ -425,7 +425,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No try: if os.path.commonpath((embed_dir, embed_path)) != embed_dir: continue - except: + except (ValueError, TypeError): continue if not os.path.isfile(embed_path): extensions = ['.safetensors', '.pt', '.bin'] diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index 8e1261a12..678bfbebf 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -205,7 +205,7 @@ class LoRAAdapter(WeightAdapterBase): try: reshape = lora[reshape_name].tolist() loaded_keys.add(reshape_name) - except: + except (AttributeError, TypeError): pass weights = (lora[A_name], lora[B_name], alpha, mid, dora_scale, reshape) loaded_keys.add(A_name) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 248efdef3..1b3cc363f 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -55,8 +55,8 @@ class FreeU(IO.ComfyNode): if hsp.device not in on_cpu_devices: try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) - except: - logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) + except (RuntimeError, TypeError) as e: + logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU. Error: {}".format(hsp.device, e)) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: @@ -109,8 +109,8 @@ class FreeU_V2(IO.ComfyNode): if hsp.device not in on_cpu_devices: try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) - except: - logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) + except (RuntimeError, TypeError) as e: + logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU. Error: {}".format(hsp.device, e)) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index 2a6a87a81..8b247c470 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -35,7 +35,7 @@ def load_hypernetwork_patch(path, strength): for d in sd: try: dim = int(d) - except: + except (ValueError, TypeError): continue output = [] diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index 975f90f45..a5d14c261 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -75,8 +75,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora out = extract_lora(weight_diff, rank) output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() - except: - logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + except (RuntimeError, ValueError, TypeError, torch.linalg.LinAlgError) as e: + logging.warning("Could not generate lora weights for key {}, is the weight difference a zero? Error: {}".format(k, e)) elif lora_type == LORAType.FULL_DIFF: output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() diff --git a/server.py b/server.py index 27b14825e..71d9e93a2 100644 --- a/server.py +++ b/server.py @@ -555,7 +555,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type=f'image/{image_format}', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) if 'channel' not in request.rel_url.query: channel = 'rgba' @@ -575,7 +575,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) elif channel == 'a': with Image.open(file) as img: @@ -592,7 +592,7 @@ class PromptServer(): alpha_buffer.seek(0) return web.Response(body=alpha_buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) else: # Use the content type from asset resolution if available, # otherwise guess from the filename. @@ -609,7 +609,7 @@ class PromptServer(): return web.FileResponse( file, headers={ - "Content-Disposition": f"filename=\"{filename}\"", + "Content-Disposition": f"attachment; filename=\"{filename}\"", "Content-Type": content_type } )