mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-11 23:00:51 +08:00
Fix pylint errors
This commit is contained in:
parent
dc94081155
commit
f54af2c7ff
@ -151,7 +151,7 @@ class FrontendManager:
|
||||
def get_installed_templates_version(cls) -> str:
|
||||
"""Get the currently installed workflow templates package version."""
|
||||
try:
|
||||
templates_version_str = version("comfyui-workflow-templates")
|
||||
templates_version_str = importlib.metadata.version("comfyui-workflow-templates")
|
||||
return templates_version_str
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@ -55,7 +55,7 @@ async def _prompt_worker(q: AbstractPromptQueue, server_instance: server_module.
|
||||
if args.cache_lru > 0:
|
||||
cache_type = execution.CacheType.LRU
|
||||
elif args.cache_none:
|
||||
cache_type = execution.CacheType.DEPENDENCY_AWARE
|
||||
cache_type = execution.CacheType.NONE
|
||||
|
||||
e = execution.PromptExecutor(server_instance, cache_type=cache_type, cache_size=args.cache_lru)
|
||||
last_gc_collect = 0
|
||||
|
||||
@ -2,7 +2,8 @@
|
||||
# LICENSE is in incl_licenses directory.
|
||||
|
||||
import torch
|
||||
from torch import nn, sin, pow
|
||||
from torch import nn
|
||||
from torch import sin, pow # pylint: disable=no-name-in-module
|
||||
from torch.nn import Parameter
|
||||
from ....model_management import cast_to
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ from .distributions import DiagonalGaussianDistribution
|
||||
|
||||
from ....ops import disable_weight_init as ops
|
||||
|
||||
from ....model_management import cast_to
|
||||
|
||||
DATA_MEAN_80D = [
|
||||
-1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927,
|
||||
@ -126,10 +127,10 @@ class VAE(nn.Module):
|
||||
return dec
|
||||
|
||||
def normalize(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return (x - comfy.model_management.cast_to(self.data_mean, dtype=x.dtype, device=x.device)) / comfy.model_management.cast_to(self.data_std, dtype=x.dtype, device=x.device)
|
||||
return (x - cast_to(self.data_mean, dtype=x.dtype, device=x.device)) / cast_to(self.data_std, dtype=x.dtype, device=x.device)
|
||||
|
||||
def unnormalize(self, x: torch.Tensor) -> torch.Tensor:
|
||||
return x * comfy.model_management.cast_to(self.data_std, dtype=x.dtype, device=x.device) + comfy.model_management.cast_to(self.data_mean, dtype=x.dtype, device=x.device)
|
||||
return x * cast_to(self.data_std, dtype=x.dtype, device=x.device) + cast_to(self.data_mean, dtype=x.dtype, device=x.device)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
@ -142,7 +143,9 @@ class VAE(nn.Module):
|
||||
|
||||
posterior = self.encode(x, normalize=normalize)
|
||||
if sample_posterior:
|
||||
z = posterior.sample(rng)
|
||||
raise RuntimeError("error in implementation, posterior doesn't accept this arg")
|
||||
# todo: fix this in upstream?
|
||||
# z = posterior.sample(rng)
|
||||
else:
|
||||
z = posterior.mode()
|
||||
dec = self.decode(z, unnormalize=unnormalize)
|
||||
|
||||
@ -40,6 +40,10 @@ def get_filename_list(folder_name: str) -> Sequence[str]:
|
||||
return get_filename_list_with_downloadable(folder_name)
|
||||
|
||||
|
||||
def get_folder_paths(*args, **kwargs):
|
||||
return folder_paths.get_folder_paths(*args, **kwargs)
|
||||
|
||||
|
||||
def get_filename_list_with_downloadable(folder_name: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> DownloadableFileList | list[str]:
|
||||
if known_files is None:
|
||||
known_files = _get_known_models_for_folder_name(folder_name)
|
||||
|
||||
@ -156,7 +156,7 @@ class LowVramPatch:
|
||||
intermediate_dtype = torch.float32
|
||||
out = lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype)
|
||||
if self.set_func is None:
|
||||
return float.stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key))
|
||||
return stochastic_rounding(out, weight.dtype, seed=string_to_seed(self.key))
|
||||
else:
|
||||
return self.set_func(out, seed=string_to_seed(self.key), return_weight=True)
|
||||
|
||||
|
||||
@ -77,11 +77,11 @@ except Exception as exc_info:
|
||||
|
||||
NVIDIA_MEMORY_CONV_BUG_WORKAROUND = False
|
||||
try:
|
||||
if comfy.model_management.is_nvidia():
|
||||
if torch.backends.cudnn.version() >= 91002 and comfy.model_management.torch_version_numeric >= (2, 9) and comfy.model_management.torch_version_numeric <= (2, 10):
|
||||
if model_management.is_nvidia():
|
||||
if torch.backends.cudnn.version() >= 91002 and model_management.torch_version_numeric >= (2, 9) and model_management.torch_version_numeric <= (2, 10):
|
||||
# TODO: change upper bound version once it's fixed'
|
||||
NVIDIA_MEMORY_CONV_BUG_WORKAROUND = True
|
||||
logging.info("working around nvidia conv3d memory bug.")
|
||||
logger.info("working around nvidia conv3d memory bug.")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@ -324,7 +324,7 @@ def _calc_cond_batch(model: BaseModel, conds, x_in: torch.Tensor, timestep: torc
|
||||
copy_dict1=False)
|
||||
|
||||
if patches is not None:
|
||||
transformer_options["patches"] = comfy.patcher_extension.merge_nested_dicts(
|
||||
transformer_options["patches"] = patcher_extension.merge_nested_dicts(
|
||||
transformer_options.get("patches", {}),
|
||||
patches
|
||||
)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from .llama import Gemma2_2B
|
||||
from .llama import Gemma2_2B, Gemma3_4B
|
||||
from .spiece_tokenizer import SPieceTokenizer
|
||||
from .. import sd1_clip
|
||||
|
||||
@ -40,7 +40,7 @@ class Gemma2_2BModel(sd1_clip.SDClipModel):
|
||||
|
||||
class Gemma3_4BModel(sd1_clip.SDClipModel):
|
||||
def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}):
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=Gemma3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
|
||||
|
||||
class LuminaModel(sd1_clip.SD1ClipModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options=None, name="gemma2_2b", clip_model=Gemma2_2BModel):
|
||||
@ -51,6 +51,7 @@ class LuminaModel(sd1_clip.SD1ClipModel):
|
||||
|
||||
|
||||
def te(dtype_llama=None, llama_scaled_fp8=None, model_type="gemma2_2b"):
|
||||
model = None
|
||||
if model_type == "gemma2_2b":
|
||||
model = Gemma2_2BModel
|
||||
elif model_type == "gemma3_4b":
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
from typing_extensions import override
|
||||
from comfy.common import MAX_RESOLUTION
|
||||
from comfy.nodes.common import MAX_RESOLUTION
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ from skimage import exposure
|
||||
import comfy.utils
|
||||
from comfy.component_model.tensor_types import RGBImageBatch, ImageBatch, MaskBatch
|
||||
from comfy.nodes.package_typing import CustomNode
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
def resize_mask(mask, shape):
|
||||
return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1)
|
||||
|
||||
@ -29,7 +29,8 @@ class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}))
|
||||
encode = execute # TODO: remove
|
||||
|
||||
encode = execute # TODO: remove
|
||||
|
||||
|
||||
class FluxGuidance(io.ComfyNode):
|
||||
@ -75,7 +76,8 @@ class FluxDisableGuidance(io.ComfyNode):
|
||||
def execute(cls, conditioning) -> io.NodeOutput:
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
||||
return io.NodeOutput(c)
|
||||
append = execute # TODO: remove
|
||||
|
||||
append = execute # TODO: remove
|
||||
|
||||
|
||||
class FluxKontextImageScale(io.ComfyNode):
|
||||
|
||||
@ -4,6 +4,8 @@ import av
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.model_sampling
|
||||
import comfy.utils
|
||||
@ -11,6 +13,7 @@ from comfy import node_helpers
|
||||
from comfy.ldm.lightricks.symmetric_patchifier import SymmetricPatchifier, latent_to_pixel_coords
|
||||
from comfy.nodes import base_nodes as nodes
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
from typing_extensions import override
|
||||
|
||||
class EmptyLTXVLatentVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
|
||||
Loading…
Reference in New Issue
Block a user