From 3d0306b89fefa69bb1eb8d92537f121121f3aa99 Mon Sep 17 00:00:00 2001 From: doctorpangloss <@hiddenswitch.com> Date: Tue, 17 Jun 2025 11:36:41 -0700 Subject: [PATCH] more fixes from pylint --- .pylintrc | 2 +- comfy/app/app_settings.py | 4 ++-- comfy/k_diffusion/sampling.py | 2 +- comfy/ldm/ace/vae/music_vocoder.py | 10 +++++----- comfy/ldm/chroma/model.py | 4 ++-- comfy/ldm/modules/attention.py | 4 +++- comfy/weight_adapter/glora.py | 2 ++ comfy/weight_adapter/lora.py | 2 ++ comfy_extras/nodes/nodes_hunyuan.py | 2 ++ comfy_extras/nodes/nodes_torch_compile.py | 2 +- 10 files changed, 21 insertions(+), 13 deletions(-) diff --git a/.pylintrc b/.pylintrc index c376f2078..a095aa977 100644 --- a/.pylintrc +++ b/.pylintrc @@ -68,7 +68,7 @@ ignored-modules=sentencepiece.*,comfy.api,comfy.cmd.folder_paths # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). -#init-hook= +init-hook='import sys; sys.path.insert(0, ".")' # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use, and will cap the count on Windows to diff --git a/comfy/app/app_settings.py b/comfy/app/app_settings.py index cf2030f9c..ac68115ce 100644 --- a/comfy/app/app_settings.py +++ b/comfy/app/app_settings.py @@ -17,14 +17,14 @@ class AppSettings(): "comfy.settings.json" ) except KeyError as e: - loggererror("User settings not found.") + logger.error("User settings not found.") raise web.HTTPUnauthorized() from e if os.path.isfile(file): try: with open(file) as f: return json.load(f) except: - loggererror(f"The user settings file is corrupted: {file}") + logger.error(f"The user settings file is corrupted: {file}") return {} else: return {} diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index c2128bd5b..783014bae 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1405,7 +1405,7 @@ def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, if cfg_pp: model_options = extra_args.get("model_options", {}).copy() - extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True) + extra_args["model_options"] = model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True) for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) diff --git a/comfy/ldm/ace/vae/music_vocoder.py b/comfy/ldm/ace/vae/music_vocoder.py index 9ee9e9d1f..074194a8f 100755 --- a/comfy/ldm/ace/vae/music_vocoder.py +++ b/comfy/ldm/ace/vae/music_vocoder.py @@ -333,9 +333,9 @@ class ResBlock1(torch.nn.Module): def remove_weight_norm(self): for conv in self.convs1: - remove_weight_norm(conv) + remove_weight_norm(conv, None) for conv in self.convs2: - remove_weight_norm(conv) + remove_weight_norm(conv, None) class HiFiGANGenerator(nn.Module): @@ -454,11 +454,11 @@ class HiFiGANGenerator(nn.Module): def remove_weight_norm(self): for up in self.ups: - remove_weight_norm(up) + remove_weight_norm(up, None) for block in self.resblocks: block.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) + remove_weight_norm(self.conv_pre, None) + remove_weight_norm(self.conv_post, None) class ADaMoSHiFiGANV1(nn.Module): diff --git a/comfy/ldm/chroma/model.py b/comfy/ldm/chroma/model.py index 485e544e8..06032d9dc 100644 --- a/comfy/ldm/chroma/model.py +++ b/comfy/ldm/chroma/model.py @@ -220,7 +220,7 @@ class Chroma(nn.Module): if i not in self.skip_dit: single_mod = self.get_modulations(mod_vectors, "single", idx=i) if ("single_block", i) in blocks_replace: - def block_wrap(args): + def block_wrap_1(args): out = {} out["img"] = block(args["img"], vec=args["vec"], @@ -232,7 +232,7 @@ class Chroma(nn.Module): "vec": single_mod, "pe": pe, "attn_mask": attn_mask}, - {"original_block": block_wrap}) + {"original_block": block_wrap_1}) img = out["img"] else: img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index aa9faf723..f6910368f 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -21,10 +21,12 @@ if model_management.sage_attention_enabled(): from sageattention import sageattn # pylint: disable=import-error except ModuleNotFoundError as e: if e.name == "sageattention": + import sys logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") else: raise e - sageattn = torch.nn.functional.scaled_dot_product_attention +else: + sageattn = torch.nn.functional.scaled_dot_product_attention if model_management.flash_attention_enabled(): diff --git a/comfy/weight_adapter/glora.py b/comfy/weight_adapter/glora.py index 12d873d24..040336054 100644 --- a/comfy/weight_adapter/glora.py +++ b/comfy/weight_adapter/glora.py @@ -54,6 +54,8 @@ class GLoRAAdapter(WeightAdapterBase): dora_scale = v[5] old_glora = False + # will correctly through div by zero error if rank was not read + rank = 0 if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]: rank = v[0].shape[0] old_glora = True diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index e9fd921cc..17ae63640 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -34,6 +34,8 @@ class LoRAAdapter(WeightAdapterBase): mochi_lora = "{}.lora_B".format(x) transformers_lora = "{}.lora_linear_layer.up.weight".format(x) A_name = None + B_name = None + mid_name = None if regular_lora in lora.keys(): A_name = regular_lora diff --git a/comfy_extras/nodes/nodes_hunyuan.py b/comfy_extras/nodes/nodes_hunyuan.py index c2bf9539c..161af471a 100644 --- a/comfy_extras/nodes/nodes_hunyuan.py +++ b/comfy_extras/nodes/nodes_hunyuan.py @@ -108,6 +108,8 @@ class HunyuanImageToVideo: out_latent["noise_mask"] = mask elif guidance_type == "custom": cond = {"ref_latent": concat_latent_image} + else: + cond = None positive = node_helpers.conditioning_set_values(positive, cond) diff --git a/comfy_extras/nodes/nodes_torch_compile.py b/comfy_extras/nodes/nodes_torch_compile.py index 508515782..9bac7854e 100644 --- a/comfy_extras/nodes/nodes_torch_compile.py +++ b/comfy_extras/nodes/nodes_torch_compile.py @@ -199,7 +199,7 @@ class QuantizeModel(CustomNode): if "autoquant" in strategy: _in_place_fixme = autoquant(unet, error_on_unseen=False) else: - quantize_(unet, int8_dynamic_activation_int8_weight(), device=model_management.get_torch_device(), set_inductor_config=False) + quantize_(unet, int8_dynamic_activation_int8_weight(), device=model_management.get_torch_device()) _in_place_fixme = unet unwrap_tensor_subclass(_in_place_fixme) else: