more fixes from pylint

This commit is contained in:
doctorpangloss 2025-06-17 11:36:41 -07:00
parent 929bfbb99f
commit 3d0306b89f
10 changed files with 21 additions and 13 deletions

View File

@ -68,7 +68,7 @@ ignored-modules=sentencepiece.*,comfy.api,comfy.cmd.folder_paths
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
init-hook='import sys; sys.path.insert(0, ".")'
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to

View File

@ -17,14 +17,14 @@ class AppSettings():
"comfy.settings.json"
)
except KeyError as e:
loggererror("User settings not found.")
logger.error("User settings not found.")
raise web.HTTPUnauthorized() from e
if os.path.isfile(file):
try:
with open(file) as f:
return json.load(f)
except:
loggererror(f"The user settings file is corrupted: {file}")
logger.error(f"The user settings file is corrupted: {file}")
return {}
else:
return {}

View File

@ -1405,7 +1405,7 @@ def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None,
if cfg_pp:
model_options = extra_args.get("model_options", {}).copy()
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
extra_args["model_options"] = model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)

View File

@ -333,9 +333,9 @@ class ResBlock1(torch.nn.Module):
def remove_weight_norm(self):
for conv in self.convs1:
remove_weight_norm(conv)
remove_weight_norm(conv, None)
for conv in self.convs2:
remove_weight_norm(conv)
remove_weight_norm(conv, None)
class HiFiGANGenerator(nn.Module):
@ -454,11 +454,11 @@ class HiFiGANGenerator(nn.Module):
def remove_weight_norm(self):
for up in self.ups:
remove_weight_norm(up)
remove_weight_norm(up, None)
for block in self.resblocks:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
remove_weight_norm(self.conv_pre, None)
remove_weight_norm(self.conv_post, None)
class ADaMoSHiFiGANV1(nn.Module):

View File

@ -220,7 +220,7 @@ class Chroma(nn.Module):
if i not in self.skip_dit:
single_mod = self.get_modulations(mod_vectors, "single", idx=i)
if ("single_block", i) in blocks_replace:
def block_wrap(args):
def block_wrap_1(args):
out = {}
out["img"] = block(args["img"],
vec=args["vec"],
@ -232,7 +232,7 @@ class Chroma(nn.Module):
"vec": single_mod,
"pe": pe,
"attn_mask": attn_mask},
{"original_block": block_wrap})
{"original_block": block_wrap_1})
img = out["img"]
else:
img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask)

View File

@ -21,10 +21,12 @@ if model_management.sage_attention_enabled():
from sageattention import sageattn # pylint: disable=import-error
except ModuleNotFoundError as e:
if e.name == "sageattention":
import sys
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention")
else:
raise e
sageattn = torch.nn.functional.scaled_dot_product_attention
else:
sageattn = torch.nn.functional.scaled_dot_product_attention
if model_management.flash_attention_enabled():

View File

@ -54,6 +54,8 @@ class GLoRAAdapter(WeightAdapterBase):
dora_scale = v[5]
old_glora = False
# will correctly through div by zero error if rank was not read
rank = 0
if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]:
rank = v[0].shape[0]
old_glora = True

View File

@ -34,6 +34,8 @@ class LoRAAdapter(WeightAdapterBase):
mochi_lora = "{}.lora_B".format(x)
transformers_lora = "{}.lora_linear_layer.up.weight".format(x)
A_name = None
B_name = None
mid_name = None
if regular_lora in lora.keys():
A_name = regular_lora

View File

@ -108,6 +108,8 @@ class HunyuanImageToVideo:
out_latent["noise_mask"] = mask
elif guidance_type == "custom":
cond = {"ref_latent": concat_latent_image}
else:
cond = None
positive = node_helpers.conditioning_set_values(positive, cond)

View File

@ -199,7 +199,7 @@ class QuantizeModel(CustomNode):
if "autoquant" in strategy:
_in_place_fixme = autoquant(unet, error_on_unseen=False)
else:
quantize_(unet, int8_dynamic_activation_int8_weight(), device=model_management.get_torch_device(), set_inductor_config=False)
quantize_(unet, int8_dynamic_activation_int8_weight(), device=model_management.get_torch_device())
_in_place_fixme = unet
unwrap_tensor_subclass(_in_place_fixme)
else: