This commit is contained in:
LincolnBurrows2017 2026-03-27 23:48:25 -07:00 committed by GitHub
commit bc0d2407ab
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 21 additions and 20 deletions

View File

@ -21,7 +21,7 @@ class AppSettings():
try: try:
with open(file) as f: with open(file) as f:
return json.load(f) return json.load(f)
except: except json.JSONDecodeError:
logging.error(f"The user settings file is corrupted: {file}") logging.error(f"The user settings file is corrupted: {file}")
return {} return {}
else: else:

View File

@ -73,7 +73,8 @@ class ModelFileManager:
img.save(img_bytes, format="WEBP") img.save(img_bytes, format="WEBP")
img_bytes.seek(0) img_bytes.seek(0)
return web.Response(body=img_bytes.getvalue(), content_type="image/webp") return web.Response(body=img_bytes.getvalue(), content_type="image/webp")
except: except (IOError, OSError, ValueError, Image.DecompressionBombError) as e:
logging.warning(f"Failed to load preview image {default_preview}: {e}")
return web.Response(status=404) return web.Response(status=404)
def get_model_file_list(self, folder_name: str): def get_model_file_list(self, folder_name: str):

View File

@ -70,7 +70,7 @@ try:
#TODO: change upper bound version once it's fixed' #TODO: change upper bound version once it's fixed'
NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True
logging.info("working around nvidia conv3d memory bug.") logging.info("working around nvidia conv3d memory bug.")
except: except (RuntimeError, AttributeError, TypeError):
pass pass
cast_to = comfy.model_management.cast_to #TODO: remove once no more references cast_to = comfy.model_management.cast_to #TODO: remove once no more references

View File

@ -1118,19 +1118,19 @@ class VAE:
def spacial_compression_decode(self): def spacial_compression_decode(self):
try: try:
return self.upscale_ratio[-1] return self.upscale_ratio[-1]
except: except (IndexError, TypeError):
return self.upscale_ratio return self.upscale_ratio
def spacial_compression_encode(self): def spacial_compression_encode(self):
try: try:
return self.downscale_ratio[-1] return self.downscale_ratio[-1]
except: except (IndexError, TypeError):
return self.downscale_ratio return self.downscale_ratio
def temporal_compression_decode(self): def temporal_compression_decode(self):
try: try:
return round(self.upscale_ratio[0](8192) / 8192) return round(self.upscale_ratio[0](8192) / 8192)
except: except (IndexError, TypeError, AttributeError):
return None return None

View File

@ -358,7 +358,7 @@ def token_weights(string, current_weight):
try: try:
weight = float(x[xx+1:]) weight = float(x[xx+1:])
x = x[:xx] x = x[:xx]
except: except ValueError:
pass pass
out += token_weights(x, weight) out += token_weights(x, weight)
else: else:
@ -425,7 +425,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
try: try:
if os.path.commonpath((embed_dir, embed_path)) != embed_dir: if os.path.commonpath((embed_dir, embed_path)) != embed_dir:
continue continue
except: except (ValueError, TypeError):
continue continue
if not os.path.isfile(embed_path): if not os.path.isfile(embed_path):
extensions = ['.safetensors', '.pt', '.bin'] extensions = ['.safetensors', '.pt', '.bin']

View File

@ -205,7 +205,7 @@ class LoRAAdapter(WeightAdapterBase):
try: try:
reshape = lora[reshape_name].tolist() reshape = lora[reshape_name].tolist()
loaded_keys.add(reshape_name) loaded_keys.add(reshape_name)
except: except (AttributeError, TypeError):
pass pass
weights = (lora[A_name], lora[B_name], alpha, mid, dora_scale, reshape) weights = (lora[A_name], lora[B_name], alpha, mid, dora_scale, reshape)
loaded_keys.add(A_name) loaded_keys.add(A_name)

View File

@ -55,8 +55,8 @@ class FreeU(IO.ComfyNode):
if hsp.device not in on_cpu_devices: if hsp.device not in on_cpu_devices:
try: try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except: except (RuntimeError, TypeError) as e:
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU. Error: {}".format(hsp.device, e))
on_cpu_devices[hsp.device] = True on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else: else:
@ -109,8 +109,8 @@ class FreeU_V2(IO.ComfyNode):
if hsp.device not in on_cpu_devices: if hsp.device not in on_cpu_devices:
try: try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except: except (RuntimeError, TypeError) as e:
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU. Error: {}".format(hsp.device, e))
on_cpu_devices[hsp.device] = True on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else: else:

View File

@ -35,7 +35,7 @@ def load_hypernetwork_patch(path, strength):
for d in sd: for d in sd:
try: try:
dim = int(d) dim = int(d)
except: except (ValueError, TypeError):
continue continue
output = [] output = []

View File

@ -75,8 +75,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora
out = extract_lora(weight_diff, rank) out = extract_lora(weight_diff, rank)
output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu()
output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu()
except: except (RuntimeError, ValueError, TypeError, torch.linalg.LinAlgError) as e:
logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) logging.warning("Could not generate lora weights for key {}, is the weight difference a zero? Error: {}".format(k, e))
elif lora_type == LORAType.FULL_DIFF: elif lora_type == LORAType.FULL_DIFF:
output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()

View File

@ -555,7 +555,7 @@ class PromptServer():
buffer.seek(0) buffer.seek(0)
return web.Response(body=buffer.read(), content_type=f'image/{image_format}', return web.Response(body=buffer.read(), content_type=f'image/{image_format}',
headers={"Content-Disposition": f"filename=\"{filename}\""}) headers={"Content-Disposition": f"attachment; filename=\"{filename}\""})
if 'channel' not in request.rel_url.query: if 'channel' not in request.rel_url.query:
channel = 'rgba' channel = 'rgba'
@ -575,7 +575,7 @@ class PromptServer():
buffer.seek(0) buffer.seek(0)
return web.Response(body=buffer.read(), content_type='image/png', return web.Response(body=buffer.read(), content_type='image/png',
headers={"Content-Disposition": f"filename=\"{filename}\""}) headers={"Content-Disposition": f"attachment; filename=\"{filename}\""})
elif channel == 'a': elif channel == 'a':
with Image.open(file) as img: with Image.open(file) as img:
@ -592,7 +592,7 @@ class PromptServer():
alpha_buffer.seek(0) alpha_buffer.seek(0)
return web.Response(body=alpha_buffer.read(), content_type='image/png', return web.Response(body=alpha_buffer.read(), content_type='image/png',
headers={"Content-Disposition": f"filename=\"{filename}\""}) headers={"Content-Disposition": f"attachment; filename=\"{filename}\""})
else: else:
# Use the content type from asset resolution if available, # Use the content type from asset resolution if available,
# otherwise guess from the filename. # otherwise guess from the filename.
@ -609,7 +609,7 @@ class PromptServer():
return web.FileResponse( return web.FileResponse(
file, file,
headers={ headers={
"Content-Disposition": f"filename=\"{filename}\"", "Content-Disposition": f"attachment; filename=\"{filename}\"",
"Content-Type": content_type "Content-Type": content_type
} }
) )