mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-11 06:40:48 +08:00
more fixes from pylint
This commit is contained in:
parent
929bfbb99f
commit
3d0306b89f
@ -68,7 +68,7 @@ ignored-modules=sentencepiece.*,comfy.api,comfy.cmd.folder_paths
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
init-hook='import sys; sys.path.insert(0, ".")'
|
||||
|
||||
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
|
||||
# number of processors available to use, and will cap the count on Windows to
|
||||
|
||||
@ -17,14 +17,14 @@ class AppSettings():
|
||||
"comfy.settings.json"
|
||||
)
|
||||
except KeyError as e:
|
||||
loggererror("User settings not found.")
|
||||
logger.error("User settings not found.")
|
||||
raise web.HTTPUnauthorized() from e
|
||||
if os.path.isfile(file):
|
||||
try:
|
||||
with open(file) as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
loggererror(f"The user settings file is corrupted: {file}")
|
||||
logger.error(f"The user settings file is corrupted: {file}")
|
||||
return {}
|
||||
else:
|
||||
return {}
|
||||
|
||||
@ -1405,7 +1405,7 @@ def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None,
|
||||
|
||||
if cfg_pp:
|
||||
model_options = extra_args.get("model_options", {}).copy()
|
||||
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
||||
extra_args["model_options"] = model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
||||
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
|
||||
@ -333,9 +333,9 @@ class ResBlock1(torch.nn.Module):
|
||||
|
||||
def remove_weight_norm(self):
|
||||
for conv in self.convs1:
|
||||
remove_weight_norm(conv)
|
||||
remove_weight_norm(conv, None)
|
||||
for conv in self.convs2:
|
||||
remove_weight_norm(conv)
|
||||
remove_weight_norm(conv, None)
|
||||
|
||||
|
||||
class HiFiGANGenerator(nn.Module):
|
||||
@ -454,11 +454,11 @@ class HiFiGANGenerator(nn.Module):
|
||||
|
||||
def remove_weight_norm(self):
|
||||
for up in self.ups:
|
||||
remove_weight_norm(up)
|
||||
remove_weight_norm(up, None)
|
||||
for block in self.resblocks:
|
||||
block.remove_weight_norm()
|
||||
remove_weight_norm(self.conv_pre)
|
||||
remove_weight_norm(self.conv_post)
|
||||
remove_weight_norm(self.conv_pre, None)
|
||||
remove_weight_norm(self.conv_post, None)
|
||||
|
||||
|
||||
class ADaMoSHiFiGANV1(nn.Module):
|
||||
|
||||
@ -220,7 +220,7 @@ class Chroma(nn.Module):
|
||||
if i not in self.skip_dit:
|
||||
single_mod = self.get_modulations(mod_vectors, "single", idx=i)
|
||||
if ("single_block", i) in blocks_replace:
|
||||
def block_wrap(args):
|
||||
def block_wrap_1(args):
|
||||
out = {}
|
||||
out["img"] = block(args["img"],
|
||||
vec=args["vec"],
|
||||
@ -232,7 +232,7 @@ class Chroma(nn.Module):
|
||||
"vec": single_mod,
|
||||
"pe": pe,
|
||||
"attn_mask": attn_mask},
|
||||
{"original_block": block_wrap})
|
||||
{"original_block": block_wrap_1})
|
||||
img = out["img"]
|
||||
else:
|
||||
img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask)
|
||||
|
||||
@ -21,10 +21,12 @@ if model_management.sage_attention_enabled():
|
||||
from sageattention import sageattn # pylint: disable=import-error
|
||||
except ModuleNotFoundError as e:
|
||||
if e.name == "sageattention":
|
||||
import sys
|
||||
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention")
|
||||
else:
|
||||
raise e
|
||||
sageattn = torch.nn.functional.scaled_dot_product_attention
|
||||
else:
|
||||
sageattn = torch.nn.functional.scaled_dot_product_attention
|
||||
|
||||
|
||||
if model_management.flash_attention_enabled():
|
||||
|
||||
@ -54,6 +54,8 @@ class GLoRAAdapter(WeightAdapterBase):
|
||||
dora_scale = v[5]
|
||||
|
||||
old_glora = False
|
||||
# will correctly through div by zero error if rank was not read
|
||||
rank = 0
|
||||
if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]:
|
||||
rank = v[0].shape[0]
|
||||
old_glora = True
|
||||
|
||||
@ -34,6 +34,8 @@ class LoRAAdapter(WeightAdapterBase):
|
||||
mochi_lora = "{}.lora_B".format(x)
|
||||
transformers_lora = "{}.lora_linear_layer.up.weight".format(x)
|
||||
A_name = None
|
||||
B_name = None
|
||||
mid_name = None
|
||||
|
||||
if regular_lora in lora.keys():
|
||||
A_name = regular_lora
|
||||
|
||||
@ -108,6 +108,8 @@ class HunyuanImageToVideo:
|
||||
out_latent["noise_mask"] = mask
|
||||
elif guidance_type == "custom":
|
||||
cond = {"ref_latent": concat_latent_image}
|
||||
else:
|
||||
cond = None
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, cond)
|
||||
|
||||
|
||||
@ -199,7 +199,7 @@ class QuantizeModel(CustomNode):
|
||||
if "autoquant" in strategy:
|
||||
_in_place_fixme = autoquant(unet, error_on_unseen=False)
|
||||
else:
|
||||
quantize_(unet, int8_dynamic_activation_int8_weight(), device=model_management.get_torch_device(), set_inductor_config=False)
|
||||
quantize_(unet, int8_dynamic_activation_int8_weight(), device=model_management.get_torch_device())
|
||||
_in_place_fixme = unet
|
||||
unwrap_tensor_subclass(_in_place_fixme)
|
||||
else:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user