Fix more lint issues

This commit is contained in:
Souyama 2026-01-08 01:09:44 +05:30
parent b6bd41772a
commit b3987fe583
45 changed files with 523 additions and 338 deletions

View File

@ -87,7 +87,7 @@ ________________________________________________________________________
""".strip()
)
else:
logging.info("ComfyUI frontend version: {}".format(frontend_version_str))
logging.info("ComfyUI frontend version: %s", frontend_version_str)
except Exception as e:
logging.error("Failed to check frontend version: %s", e)
@ -390,12 +390,12 @@ comfyui-workflow-templates is not installed.
)
if os.path.exists(expected_path):
logging.info(
f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}"
"Using existing copy of specific frontend version tag: %s/%s@%s", repo_owner, repo_name, version
)
return expected_path
logging.info(
f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub..."
"Initializing frontend: %s/%s@%s, requesting version details from GitHub...", repo_owner, repo_name, version
)
provider = provider or FrontEndProvider(repo_owner, repo_name)

View File

@ -84,8 +84,8 @@ def load_audio_encoder_from_sd(sd, prefix=""):
audio_encoder = AudioEncoderModel(config)
m, u = audio_encoder.load_sd(sd)
if len(m) > 0:
logging.warning("missing audio encoder: {}".format(m))
logging.warning("missing audio encoder: %s", m)
if len(u) > 0:
logging.warning("unexpected audio encoder: {}".format(u))
logging.warning("unexpected audio encoder: %s", u)
return audio_encoder

View File

@ -130,7 +130,7 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
clip = ClipVisionModel(json_config)
m, u = clip.load_sd(sd)
if len(m) > 0:
logging.warning("missing clip vision: {}".format(m))
logging.warning("missing clip vision: %s", m)
u = set(u)
keys = list(sd.keys())
for k in keys:

View File

@ -442,10 +442,10 @@ def controlnet_load_state_dict(control_model, sd):
missing, unexpected = control_model.load_state_dict(sd, strict=False)
if len(missing) > 0:
logging.warning("missing controlnet keys: {}".format(missing))
logging.warning("missing controlnet keys: %s", missing)
if len(unexpected) > 0:
logging.debug("unexpected controlnet keys: {}".format(unexpected))
logging.debug("unexpected controlnet keys: %s", unexpected)
return control_model
@ -668,7 +668,7 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}):
leftover_keys = controlnet_data.keys()
if len(leftover_keys) > 0:
logging.warning("leftover keys: {}".format(leftover_keys))
logging.warning("leftover keys: %s", leftover_keys)
controlnet_data = new_sd
elif "controlnet_blocks.0.weight" in controlnet_data:
if "double_blocks.0.img_attn.norm.key_norm.scale" in controlnet_data:
@ -753,10 +753,10 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}):
missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
if len(missing) > 0:
logging.warning("missing controlnet keys: {}".format(missing))
logging.warning("missing controlnet keys: %s", missing)
if len(unexpected) > 0:
logging.debug("unexpected controlnet keys: {}".format(unexpected))
logging.debug("unexpected controlnet keys: %s", unexpected)
global_average_pooling = model_options.get("global_average_pooling", False)
control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype)
@ -771,7 +771,7 @@ def load_controlnet(ckpt_path, model=None, model_options={}):
cnet = load_controlnet_state_dict(comfy.utils.load_torch_file(ckpt_path, safe_load=True), model=model, model_options=model_options)
if cnet is None:
logging.error("error checkpoint does not contain controlnet or t2i adapter data {}".format(ckpt_path))
logging.error("error checkpoint does not contain controlnet or t2i adapter data %s", ckpt_path)
return cnet
class T2IAdapter(ControlBase):
@ -876,9 +876,9 @@ def load_t2i_adapter(t2i_data, model_options={}): #TODO: model_options
missing, unexpected = model_ad.load_state_dict(t2i_data)
if len(missing) > 0:
logging.warning("t2i missing {}".format(missing))
logging.warning("t2i missing", missing)
if len(unexpected) > 0:
logging.debug("t2i unexpected {}".format(unexpected))
logging.debug("t2i unexpected", unexpected)
return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm)

View File

@ -295,7 +295,7 @@ class TimestepEmbedding(nn.Module):
def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, weight_args={}, operations=None):
super().__init__()
logging.debug(
f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility."
"Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora
)
self.linear_1 = operations.Linear(in_features, out_features, bias=not use_adaln_lora, **weight_args)
self.activation = nn.SiLU()

View File

@ -632,9 +632,7 @@ class DecoderBase(nn.Module):
curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
logging.debug(
"Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)
)
"Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)
)
# z to block_in
@ -929,9 +927,7 @@ class DecoderFactorized(nn.Module):
curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res)
logging.debug(
"Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)
)
"Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)
)
# z to block_in

View File

@ -118,13 +118,20 @@ class Attention(nn.Module):
operations=None,
) -> None:
super().__init__()
context_dim = query_dim if context_dim is None else context_dim
logging.debug(
f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
f"{n_heads} heads with a dimension of {head_dim}."
"Setting up %s. Query dim is %d, context_dim is %d and using "
"%d heads with a dimension of %d.",
self.__class__.__name__,
query_dim,
context_dim,
n_heads,
head_dim,
)
self.is_selfattn = context_dim is None # self attention
context_dim = query_dim if context_dim is None else context_dim
inner_dim = head_dim * n_heads
self.n_heads = n_heads
@ -226,7 +233,7 @@ class TimestepEmbedding(nn.Module):
def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, device=None, dtype=None, operations=None):
super().__init__()
logging.debug(
f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility."
"Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora
)
self.in_dim = in_features
self.out_dim = out_features

View File

@ -92,7 +92,7 @@ class CausalContinuousVideoTokenizer(nn.Module):
num_parameters = sum(param.numel() for param in self.parameters())
logging.debug("model=%s, num_parameters=%d", self.name, num_parameters)
logging.debug(
f"z_channels={z_channels}, latent_channels={self.latent_channels}."
"z_channels=%d, latent_channels=%d.", z_channels, self.latent_channels
)
latent_temporal_chunk = 16

View File

@ -390,7 +390,7 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
steps *= 2
if steps > 64:
raise e
logging.warning("out of memory error, increasing steps and trying again {}".format(steps))
logging.warning("out of memory error, increasing steps and trying again", steps)
else:
raise e
@ -556,7 +556,7 @@ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=
try:
out = sageattn(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout)
except Exception as e:
logging.error("Error running sage attention: {}, using pytorch attention instead.".format(e))
logging.error("Error running sage attention: %s, using pytorch attention instead.", e)
exception_fallback = True
if exception_fallback:
if tensor_layout == "NHD":

View File

@ -272,7 +272,7 @@ def slice_attention(q, k, v):
steps *= 2
if steps > 128:
raise e
logging.warning("out of memory error, increasing steps and trying again {}".format(steps))
logging.warning("out of memory error, increasing steps and trying again %s", steps)
return r1
@ -725,8 +725,7 @@ class Decoder(nn.Module):
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
logging.debug("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
logging.debug("Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape))
# z to block_in
self.conv_in = conv_op(z_channels,

View File

@ -369,7 +369,7 @@ def apply_control(h, control, name):
try:
h += ctrl
except:
logging.warning("warning control could not be applied {} {}".format(h.shape, ctrl.shape))
logging.warning("warning control could not be applied %s %s", h.shape, ctrl.shape)
return h
class UNetModel(nn.Module):

View File

@ -144,8 +144,7 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
logging.info("Selected alphas for ddim sampler: a_t: %s; a_(t-1): %s", alphas, alphas_prev)
logging.info(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
logging.info("For the chosen value of eta, which is %s, this results in the following sigma_t schedule for ddim sampler %s", eta, sigmas)
return sigmas, alphas, alphas_prev

View File

@ -90,7 +90,7 @@ def load_lora(lora, to_load, log_missing=True):
if log_missing:
for x in lora.keys():
if x not in loaded_keys:
logging.warning("lora key not loaded: {}".format(x))
logging.warning("lora key not loaded: %s", x)
return patch_dict
@ -390,7 +390,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori
if isinstance(v, weight_adapter.WeightAdapterBase):
output = v.calculate_weight(weight, key, strength, strength_model, offset, function, intermediate_dtype, original_weights)
if output is None:
logging.warning("Calculate Weight Failed: {} {}".format(v.name, key))
logging.warning("Calculate Weight Failed: %s %s", v.name, key)
else:
weight = output
if old_weight is not None:
@ -408,12 +408,12 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori
# An extra flag to pad the weight if the diff's shape is larger than the weight
do_pad_weight = len(v) > 1 and v[1]['pad_weight']
if do_pad_weight and diff.shape != weight.shape:
logging.info("Pad weight {} from {} to shape: {}".format(key, weight.shape, diff.shape))
logging.info("Pad weight %s from %s to shape: %s", key, weight.shape, diff.shape)
weight = pad_tensor_to_shape(weight, diff.shape)
if strength != 0.0:
if diff.shape != weight.shape:
logging.warning("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, diff.shape, weight.shape))
logging.warning("WARNING SHAPE MISMATCH %s WEIGHT NOT MERGED %s != %s", key, diff.shape, weight.shape)
else:
weight += function(strength * comfy.model_management.cast_to_device(diff, weight.device, weight.dtype))
elif patch_type == "set":
@ -424,7 +424,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori
comfy.model_management.cast_to_device(original_weights[key][0][0], weight.device, intermediate_dtype)
weight += function(strength * comfy.model_management.cast_to_device(diff_weight, weight.device, weight.dtype))
else:
logging.warning("patch type not recognized {} {}".format(patch_type, key))
logging.warning("patch type not recognized %s %s", patch_type, key)
if old_weight is not None:
weight = old_weight

View File

@ -144,7 +144,7 @@ class BaseModel(torch.nn.Module):
if comfy.model_management.force_channels_last():
self.diffusion_model.to(memory_format=torch.channels_last)
logging.debug("using channels last mode for diffusion model")
logging.info("model weight dtype {}, manual cast: {}".format(self.get_dtype(), self.manual_cast_dtype))
logging.info("model weight dtype %s, manual cast: %s", self.get_dtype(), self.manual_cast_dtype)
self.model_type = model_type
self.model_sampling = model_sampling(model_config, model_type)
@ -153,8 +153,8 @@ class BaseModel(torch.nn.Module):
self.adm_channels = 0
self.concat_keys = ()
logging.info("model_type {}".format(model_type.name))
logging.debug("adm {}".format(self.adm_channels))
logging.info("model_type %s", model_type.name)
logging.debug("adm %s", self.adm_channels)
self.memory_usage_factor = model_config.memory_usage_factor
self.memory_usage_factor_conds = ()
self.memory_usage_shape_process = {}
@ -308,10 +308,10 @@ class BaseModel(torch.nn.Module):
to_load = self.model_config.process_unet_state_dict(to_load)
m, u = self.diffusion_model.load_state_dict(to_load, strict=False)
if len(m) > 0:
logging.warning("unet missing: {}".format(m))
logging.warning("unet missing: %s", m)
if len(u) > 0:
logging.warning("unet unexpected: {}".format(u))
logging.warning("unet unexpected: %s", u)
del to_load
return self

View File

@ -775,7 +775,7 @@ def model_config_from_unet_config(unet_config, state_dict=None):
if model_config.matches(unet_config, state_dict):
return model_config(unet_config)
logging.error("no match {}".format(unet_config))
logging.error("no match %s", unet_config)
return None
def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=False, metadata=None):

View File

@ -98,7 +98,8 @@ if args.directml is not None:
directml_device = torch_directml.device()
else:
directml_device = torch_directml.device(device_index)
logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index)))
logging.info("Using directml with device: %s", torch_directml.device_name(device_index))
# torch_directml.disable_tiled_resources(True)
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
@ -238,13 +239,13 @@ def mac_version():
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024)
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
logging.info("Total VRAM %0.0f MB, total RAM %0.0f MB", total_vram, total_ram)
try:
logging.info("pytorch version: {}".format(torch_version))
logging.info("pytorch version: %s", torch_version)
mac_ver = mac_version()
if mac_ver is not None:
logging.info("Mac Version {}".format(mac_ver))
logging.info("Mac Version %s", mac_ver)
except:
pass
@ -268,7 +269,7 @@ else:
pass
try:
XFORMERS_VERSION = xformers.version.__version__
logging.info("xformers version: {}".format(XFORMERS_VERSION))
logging.info("xformers version: %s", XFORMERS_VERSION)
if XFORMERS_VERSION.startswith("0.0.18"):
logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
logging.warning("Please downgrade or upgrade xformers to a different version.\n")
@ -349,8 +350,8 @@ try:
except:
rocm_version = (6, -1)
logging.info("AMD arch: {}".format(arch))
logging.info("ROCm version: {}".format(rocm_version))
logging.info("AMD arch: %s", arch)
logging.info("ROCm version: %s", rocm_version)
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
if importlib.util.find_spec('triton') is not None: # AMD efficient attention implementation depends on triton. TODO: better way of detecting if it's compiled in or not.
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
@ -444,7 +445,7 @@ def get_torch_device_name(device):
return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
try:
logging.info("Device: {}".format(get_torch_device_name(get_torch_device())))
logging.info("Device: %s", get_torch_device_name(get_torch_device()))
except:
logging.warning("Could not pick default device.")
@ -573,7 +574,7 @@ if WINDOWS:
if args.reserve_vram is not None:
EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024
logging.debug("Reserving {}MB vram for other applications.".format(EXTRA_RESERVED_VRAM / (1024 * 1024)))
logging.debug("Reserving %0.2f MB of VRAM as per user request.", EXTRA_RESERVED_VRAM / (1024 * 1024))
def extra_reserved_memory():
return EXTRA_RESERVED_VRAM
@ -678,7 +679,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
free_mem = get_free_memory(device)
if free_mem < minimum_memory_required:
models_l = free_memory(minimum_memory_required, device)
logging.info("{} models unloaded.".format(len(models_l)))
logging.info("%d models unloaded.", len(models_l))
for loaded_model in models_to_load:
model = loaded_model.model
@ -724,7 +725,7 @@ def cleanup_models_gc():
for i in range(len(current_loaded_models)):
cur = current_loaded_models[i]
if cur.is_dead():
logging.info("Potential memory leak detected with model {}, doing a full garbage collect, for maximum performance avoid circular references in the model code.".format(cur.real_model().__class__.__name__))
logging.info("Potential memory leak detected with model %s, doing a full garbage collect, for maximum performance avoid circular references in the model code.", cur.real_model().__class__.__name__)
do_gc = True
break
@ -735,7 +736,7 @@ def cleanup_models_gc():
for i in range(len(current_loaded_models)):
cur = current_loaded_models[i]
if cur.is_dead():
logging.warning("WARNING, memory leak with model {}. Please make sure it is not being referenced from somewhere.".format(cur.real_model().__class__.__name__))
logging.warning("WARNING, memory leak with model %s. Please make sure it is not being referenced from somewhere.", cur.real_model().__class__.__name__)
@ -1027,7 +1028,7 @@ if args.disable_async_offload:
NUM_STREAMS = 0
if NUM_STREAMS > 0:
logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS))
logging.info("Using async weight offloading with %d streams", NUM_STREAMS)
def current_stream(device):
if device is None:
@ -1122,7 +1123,7 @@ if not args.disable_pinned_memory:
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50%
else:
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95
logging.info("Enabled pinned memory {}".format(MAX_PINNED_MEMORY // (1024 * 1024)))
logging.info("Enabled pinned memory. %0.2f MB max", MAX_PINNED_MEMORY / (1024 * 1024))
PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"])

View File

@ -778,7 +778,7 @@ class ModelPatcher:
if comfy.model_management.is_device_cuda(device_to):
torch.cuda.synchronize()
logging.debug("lowvram: loaded module regularly {} {}".format(n, m))
logging.debug("lowvram: loaded module regularly %s to %s", n, m)
m.comfy_patched_weights = True
for x in load_completely:
@ -791,10 +791,10 @@ class ModelPatcher:
self.pin_weight_to_device("{}.{}".format(n, param))
if lowvram_counter > 0:
logging.info("loaded partially; {:.2f} MB usable, {:.2f} MB loaded, {:.2f} MB offloaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter))
logging.info("loaded partially; %.2f MB usable, %.2f MB loaded, %.2f MB offloaded, %.2f MB buffer reserved, lowvram patches: %d", lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter)
self.model.model_lowvram = True
else:
logging.info("loaded completely; {:.2f} MB usable, {:.2f} MB loaded, full load: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load))
logging.info("loaded completely; %.2f MB usable, %.2f MB loaded, full load: %s", lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)
self.model.model_lowvram = False
if full_load:
self.model.to(device_to)
@ -941,7 +941,7 @@ class ModelPatcher:
offload_buffer = max(offload_buffer, potential_offload)
offload_weight_factor.append(module_mem)
offload_weight_factor.pop(0)
logging.debug("freed {}".format(n))
logging.debug("freed %s", n)
for param in params:
self.pin_weight_to_device("{}.{}".format(n, param))
@ -951,7 +951,7 @@ class ModelPatcher:
self.model.lowvram_patch_counter += patch_counter
self.model.model_loaded_weight_memory -= memory_freed
self.model.model_offload_buffer_memory = offload_buffer
logging.info("Unloaded partially: {:.2f} MB freed, {:.2f} MB remains loaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter))
logging.info("Unloaded partially: %.2f MB freed, %.2f MB remains loaded, %.2f MB buffer reserved, lowvram patches: %d", memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter)
return memory_freed
def partially_load(self, device_to, extra_memory=0, force_patch_weights=False):

View File

@ -455,7 +455,7 @@ class fp8_ops(manual_cast):
if out is not None:
return out
except Exception as e:
logging.info("Exception during fp8 op: {}".format(e))
logging.info("Exception during fp8 op: %s", str(e))
weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True)
x = torch.nn.functional.linear(input, weight, bias)

View File

@ -95,7 +95,7 @@ def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
k1 = set(k1)
for x in loaded:
if (x not in k) and (x not in k1):
logging.warning("NOT LOADED {}".format(x))
logging.warning("NOT LOADED %s", x)
return (new_modelpatcher, new_clip)
@ -139,27 +139,27 @@ class CLIP:
for c in state_dict:
m, u = self.load_sd(c)
if len(m) > 0:
logging.warning("clip missing: {}".format(m))
logging.warning("clip missing: %s", m)
if len(u) > 0:
logging.debug("clip unexpected: {}".format(u))
logging.debug("clip unexpected: %s", u)
else:
m, u = self.load_sd(state_dict, full_model=True)
if len(m) > 0:
m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m))
if len(m_filter) > 0:
logging.warning("clip missing: {}".format(m))
logging.warning("clip missing: %s", m)
else:
logging.debug("clip missing: {}".format(m))
logging.debug("clip missing: %s", m)
if len(u) > 0:
logging.debug("clip unexpected {}:".format(u))
logging.debug("clip unexpected %s:", u)
if params['device'] == load_device:
model_management.load_models_gpu([self.patcher], force_full_load=True)
self.layer_idx = None
self.use_clip_schedule = False
logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
logging.info("CLIP/text encoder model load device: %s, offload device: %s, current: %s, dtype: %s", load_device, offload_device, params['device'], dtype)
self.tokenizer_options = {}
def clone(self):
@ -664,10 +664,10 @@ class VAE:
m, u = self.first_stage_model.load_state_dict(sd, strict=False)
if len(m) > 0:
logging.warning("Missing VAE keys {}".format(m))
logging.warning("Missing VAE keys %s", str(m))
if len(u) > 0:
logging.debug("Leftover VAE keys {}".format(u))
logging.debug("Leftover VAE keys %s", str(u))
if device is None:
device = model_management.vae_device()
@ -680,7 +680,7 @@ class VAE:
self.output_device = model_management.intermediate_device()
self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device)
logging.info("VAE load device: {}, offload device: {}, dtype: {}".format(self.device, offload_device, self.vae_dtype))
logging.info("VAE load device: %s, offload device: %s, dtype: %s", self.device, offload_device, self.vae_dtype)
self.model_size()
def model_size(self):
@ -1440,7 +1440,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
left_over = sd.keys()
if len(left_over) > 0:
logging.debug("left over keys: {}".format(left_over))
logging.debug("left over keys: %s", left_over)
if output_model:
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
@ -1510,7 +1510,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None):
if k in sd:
new_sd[diffusers_keys[k]] = sd.pop(k)
else:
logging.warning("{} {}".format(diffusers_keys[k], k))
logging.warning("%s %s", diffusers_keys[k], k)
offload_device = model_management.unet_offload_device()
unet_weight_dtype = list(model_config.supported_inference_dtypes)
@ -1539,7 +1539,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None):
model.load_model_weights(new_sd, "")
left_over = sd.keys()
if len(left_over) > 0:
logging.info("left over keys in diffusion model: {}".format(left_over))
logging.info("left over keys in diffusion model: %s", left_over)
return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device)
@ -1547,7 +1547,7 @@ def load_diffusion_model(unet_path, model_options={}):
sd, metadata = comfy.utils.load_torch_file(unet_path, return_metadata=True)
model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata)
if model is None:
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path))
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL %s", unet_path)
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd)))
return model

View File

@ -235,7 +235,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
else:
index += -1
pad_extra += emb_shape
logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored {} != {}".format(emb.shape[-1], tokens_embed.shape[-1]))
logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored %s != %s", emb.shape[-1], tokens_embed.shape[-1])
if pad_extra > 0:
padd_embed = self.transformer.get_input_embeddings()(torch.tensor([[self.special_tokens["pad"]] * pad_extra], device=device, dtype=torch.long), out_dtype=torch.float32)
@ -438,7 +438,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
except:
embed_out = safe_load_embed_zip(embed_path)
except Exception:
logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name))
logging.warning("%s\n\nerror loading embedding, skipping loading: %s", traceback.format_exc(), embedding_name)
return None
if embed_out is None:

View File

@ -120,5 +120,5 @@ class BASE:
self.manual_cast_dtype = manual_cast_dtype
def __getattr__(self, name):
logging.warning("\nWARNING, you accessed {} from the model config object which doesn't exist. Please fix your code.\n".format(name))
logging.warning("\nWARNING, you accessed %s from the model config object which doesn't exist. Please fix your code.\n", name)
return None

View File

@ -80,7 +80,7 @@ class VoiceBpeTokenizer:
token_idx = self.encode(line, lang)
lyric_token_idx = lyric_token_idx + token_idx + [2]
except Exception as e:
logging.warning("tokenize error {} for line {} major_language {}".format(e, line, lang))
logging.warning("tokenize error %s for line %s major_language %s", e, line, lang)
return {"input_ids": lyric_token_idx}
@staticmethod

View File

@ -62,7 +62,7 @@ class HiDreamTEModel(torch.nn.Module):
else:
self.llama = None
logging.debug("Created HiDream text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}, llama {}:{}".format(clip_l, clip_g, t5, dtype_t5, llama, dtype_llama))
logging.debug("Created HiDream text encoder with: clip_l %s, clip_g %s, t5xxl %s:%s, llama %s:%s", clip_l, clip_g, t5, dtype_t5, llama, dtype_llama)
def set_clip_options(self, options):
if self.clip_l is not None:

View File

@ -81,7 +81,7 @@ class SD3ClipModel(torch.nn.Module):
else:
self.t5xxl = None
logging.debug("Created SD3 text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}".format(clip_l, clip_g, t5, dtype_t5))
logging.debug("Created SD3 text encoder with: clip_l %s, clip_g %s, t5xxl %s:%s", clip_l, clip_g, t5, dtype_t5)
def set_clip_options(self, options):
if self.clip_l is not None:

View File

@ -86,7 +86,7 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False):
if safe_load or ALWAYS_SAFE_LOAD:
pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args)
else:
logging.warning("WARNING: loading {} unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.".format(ckpt))
logging.warning("WARNING: loading %s unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.", ckpt)
pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle)
if "state_dict" in pl_sd:
sd = pl_sd["state_dict"]

View File

@ -111,5 +111,5 @@ class BOFTAdapter(WeightAdapterBase):
else:
weight += function((strength * lora_diff).type(weight.dtype))
except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e))
logging.error("ERROR %s %s %s", self.name, key, e)
return weight

View File

@ -89,5 +89,5 @@ class GLoRAAdapter(WeightAdapterBase):
else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e))
logging.error("ERROR %s %s %s", self.name, key, e)
return weight

View File

@ -228,5 +228,5 @@ class LoHaAdapter(WeightAdapterBase):
else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e))
logging.error("ERROR %s %s %s", self.name, key, e)
return weight

View File

@ -216,5 +216,5 @@ class LoKrAdapter(WeightAdapterBase):
else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e))
logging.error("ERROR %s %s %s", self.name, key, e)
return weight

View File

@ -208,5 +208,5 @@ class LoRAAdapter(WeightAdapterBase):
else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e))
logging.error("ERROR %s %s %s", self.name, key, e)
return weight

View File

@ -157,5 +157,5 @@ class OFTAdapter(WeightAdapterBase):
else:
weight += function((strength * lora_diff).type(weight.dtype))
except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e))
logging.error("ERROR %s %s %s", self.name, key, e)
return weight

View File

@ -34,7 +34,7 @@ def generate_stubs_for_module(module_name: str) -> None:
logging.info("Generated stub file for %s", module_name)
else:
logging.warning(
f"Module {module_name} has ComfyAPISync but no ComfyAPI"
"Module %s has ComfyAPISync but no ComfyAPI", module_name
)
elif hasattr(module, "ComfyAPI"):
@ -49,7 +49,7 @@ def generate_stubs_for_module(module_name: str) -> None:
logging.info("Generated stub file for %s", module_name)
else:
logging.warning(
f"Module {module_name} does not export ComfyAPI or ComfyAPISync"
"Module %s does not export ComfyAPI or ComfyAPISync", module_name
)
except Exception as e:

View File

@ -282,7 +282,7 @@ class AsyncToSyncConverter:
setattr(self._async_instance, attr_name, async_instance)
except Exception as e:
logging.warning(
f"Failed to create instance for {attr_name}: {e}"
"Failed to create instance for %s: %s", attr_name, e
)
# Handle other instance attributes that might not be annotated
@ -981,7 +981,7 @@ class AsyncToSyncConverter:
except Exception as e:
# If stub generation fails, log the error but don't break the main functionality
logging.error(
f"Error generating stub file for {sync_class.__name__}: {str(e)}"
"Error generating stub file for %s: %s", sync_class.__name__, str(e)
)
import traceback

View File

@ -1002,7 +1002,7 @@ class ImageDeduplicationNode(ImageProcessingNode):
if similarity >= similarity_threshold:
is_duplicate = True
logging.info(
f"Image {i} is similar to image {j} (similarity: {similarity:.3f}), skipping"
"Image %d is similar to image %d (similarity: %.3f), skipping", i, j, similarity
)
break
@ -1012,7 +1012,7 @@ class ImageDeduplicationNode(ImageProcessingNode):
# Return only unique images
unique_images = [images[i] for i in keep_indices]
logging.info(
f"Deduplication: kept {len(unique_images)} out of {len(images)} images"
"Deduplication: kept %d out of %d images", len(unique_images), len(images)
)
return unique_images
@ -1086,7 +1086,7 @@ class ImageGridNode(ImageProcessingNode):
grid.paste(img, (x, y))
logging.info(
f"Created {columns}x{rows} grid with {num_images} images ({grid_width}x{grid_height})"
"Created %d x %d grid with %d images (%d x %d)", columns, rows, num_images, grid_width, grid_height
)
return pil_to_tensor(grid)
@ -1214,7 +1214,7 @@ class ResolutionBucket(io.ComfyNode):
output_conditions.append(bucket_data["conditions"])
logging.info(
f"Resolution bucket ({h}x{w}): {len(bucket_data['latents'])} samples"
"Resolution bucket (%d x %d): %d samples", h, w, len(bucket_data["latents"])
)
logging.info("Created %s resolution buckets from %s samples", len(buckets), len(flat_latents))
@ -1302,7 +1302,7 @@ class MakeTrainingDataset(io.ComfyNode):
conditioning_list.append(cond)
logging.info(
f"Created dataset with {len(latents_list)} latents and {len(conditioning_list)} conditioning."
"Created dataset with %d latents and %d conditioning.", len(latents_list), len(conditioning_list)
)
return io.NodeOutput(latents_list, conditioning_list)
@ -1369,7 +1369,7 @@ class SaveTrainingDataset(io.ComfyNode):
num_shards = (num_samples + shard_size - 1) // shard_size # Ceiling division
logging.info(
f"Saving {num_samples} samples to {num_shards} shards in {output_dir}..."
"Saving %d samples to %d shards in %s...", num_samples, num_shards, output_dir
)
# Save data in shards
@ -1391,7 +1391,7 @@ class SaveTrainingDataset(io.ComfyNode):
torch.save(shard_data, f)
logging.info(
f"Saved shard {shard_idx + 1}/{num_shards}: {shard_filename} ({end_idx - start_idx} samples)"
"Saved shard %d/%d: %s (%d samples)", shard_idx + 1, num_shards, shard_filename, end_idx - start_idx
)
# Save metadata
@ -1477,7 +1477,7 @@ class LoadTrainingDataset(io.ComfyNode):
logging.info("Loaded %s: %s samples", shard_file, len(shard_data['latents']))
logging.info(
f"Successfully loaded {len(all_latents)} samples from {dataset_dir}."
"Successfully loaded %d samples from %s.", len(all_latents), dataset_dir
)
return io.NodeOutput(all_latents, all_conditioning)

View File

@ -300,7 +300,7 @@ class EasyCacheHolder:
return True
if metadata == self.state_metadata:
return True
logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state")
logging.warning("%s - Tensor shape, dtype or device changed, resetting state", self.name)
self.reset()
return False
@ -435,7 +435,7 @@ class LazyCacheHolder:
return True
if metadata == self.state_metadata:
return True
logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state")
logging.warning("%s - Tensor shape, dtype or device changed, resetting state", self.name)
self.reset()
return False

View File

@ -56,7 +56,7 @@ class FreeU(IO.ComfyNode):
try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except:
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device))
logging.warning("Device %s does not support the torch.fft functions used in the FreeU node, switching to CPU.", hsp.device)
on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else:
@ -110,7 +110,7 @@ class FreeU_V2(IO.ComfyNode):
try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except:
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device))
logging.warning("Device %s does not support the torch.fft functions used in the FreeU node, switching to CPU.", hsp.device)
on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else:

View File

@ -27,7 +27,7 @@ def load_hypernetwork_patch(path, strength):
}
if activation_func not in valid_activation:
logging.error("Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format(path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout))
logging.error("Unsupported Hypernetwork format, if you report it I might implement it. %s %s %s %s %s %s", path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)
return None
out = {}

View File

@ -65,7 +65,7 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora
output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu()
output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu()
except:
logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k))
logging.warning("Could not generate lora weights for key %s, is the weight difference a zero?", k)
elif lora_type == LORAType.FULL_DIFF:
output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()

View File

@ -83,7 +83,7 @@ class IsChangedCache:
is_changed = await resolve_map_node_over_list_results(is_changed)
node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed]
except Exception as e:
logging.warning("WARNING: {}".format(e))
logging.warning("WARNING: %s", e)
node["is_changed"] = float("NaN")
finally:
self.is_changed[node_id] = node["is_changed"]
@ -601,13 +601,13 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number."
logging.info("Memory summary: {}".format(comfy.model_management.debug_memory_summary()))
logging.info("Memory summary: %s", comfy.model_management.debug_memory_summary())
logging.error("Got an OOM, unloading all loaded models.")
comfy.model_management.unload_all_models()
error_details = {
"node_id": real_node_id,
"exception_message": "{}\n{}".format(ex, tips),
"exception_message": "%s\n%s" % (ex, tips),
"exception_type": exception_type,
"traceback": traceback.format_tb(tb),
"current_inputs": input_data_formatted

View File

@ -316,7 +316,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None)
except FileNotFoundError:
logging.warning("Warning: Unable to access %s. Skipping this path.", directory)
logging.debug("recursive file list on directory {}".format(directory))
logging.debug("recursive file list on directory %s", directory)
dirpath: str
subdirs: list[str]
filenames: list[str]
@ -338,7 +338,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None)
except FileNotFoundError:
logging.warning("Warning: Unable to access %s. Skipping this path.", path)
continue
logging.debug("found {} files".format(len(result)))
logging.debug("found %d files", len(result))
return result, dirs
def filter_files_extensions(files: Collection[str], extensions: Collection[str]) -> list[str]:
@ -361,7 +361,7 @@ def get_full_path(folder_name: str, filename: str) -> str | None:
if os.path.isfile(full_path):
return full_path
elif os.path.islink(full_path):
logging.warning("WARNING path {} exists but doesn't link anywhere, skipping.".format(full_path))
logging.warning("WARNING path %s exists but doesn't link anywhere, skipping.", full_path)
return None

View File

@ -102,7 +102,7 @@ def get_previewer(device, latent_format):
taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device)
previewer = TAESDPreviewerImpl(taesd)
else:
logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name))
logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/%s", latent_format.taesd_decoder_name)
if previewer is None:
if latent_format.latent_rgb_factors is not None:

13
main.py
View File

@ -41,12 +41,11 @@ if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device)
os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device)
logging.info("Set cuda device to: {}".format(args.cuda_device))
logging.info("Set cuda device to: %s", args.cuda_device)
if args.oneapi_device_selector is not None:
os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector
logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector))
logging.info("Set oneapi device selector to: %s", args.oneapi_device_selector)
if args.deterministic:
if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
@ -157,7 +156,7 @@ def execute_prestartup_script():
import_message = ""
else:
import_message = " (PRESTARTUP FAILED)"
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
logging.info("%6.1f seconds%s: %s", n[0], import_message, n[1])
logging.info("")
apply_custom_paths()
@ -253,7 +252,7 @@ def prompt_worker(q, server_instance):
execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time))
logging.info("Prompt executed in %s", execution_time)
else:
logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
logging.info("Prompt executed in %.2f seconds", execution_time)
flags = q.get_flags()
free_memory = flags.get("free_memory", False)
@ -399,8 +398,8 @@ def start_comfyui(asyncio_loop=None):
if __name__ == "__main__":
# Running directly, just start ComfyUI.
logging.info("Python version: {}".format(sys.version))
logging.info("ComfyUI version: {}".format(comfyui_version.__version__))
logging.info("Python version: %s", sys.version)
logging.info("ComfyUI version: %s", comfyui_version.__version__)
if sys.version_info.major == 3 and sys.version_info.minor < 10:
logging.warning("WARNING: You are using a python version older than 3.10, please upgrade to a newer one. 3.12 and above is recommended.")

View File

@ -2142,7 +2142,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
sys_module_name = module_path.replace(".", "_x_")
try:
logging.debug("Trying to load custom node {}".format(module_path))
logging.debug("Trying to load custom node %s", module_path)
if os.path.isfile(module_path):
module_spec = importlib.util.spec_from_file_location(sys_module_name, module_path)
module_dir = os.path.split(module_path)[0]
@ -2171,7 +2171,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
EXTENSION_WEB_DIRS[project_name] = web_dir_path
logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name))
logging.info("Automatically register web folder %s for %s", web_dir_name, project_name)
except Exception as e:
logging.warning("Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': %s", e)
@ -2271,7 +2271,7 @@ async def init_external_custom_nodes():
import_message = ""
else:
import_message = " (IMPORT FAILED)"
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
logging.info("%6.1f seconds%s: %s", n[0], import_message, n[1])
logging.info("")
async def init_builtin_extra_nodes():
@ -2440,7 +2440,7 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True):
if len(import_failed_api) > 0:
logging.warning("WARNING: some comfy_api_nodes/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
for node in import_failed_api:
logging.warning("IMPORT FAILED: {}".format(node))
logging.warning("IMPORT FAILED: %s", node)
logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
if args.windows_standalone_build:
logging.warning("Please run the update script: update/update_comfyui.bat")
@ -2451,7 +2451,8 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True):
if len(import_failed) > 0:
logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
for node in import_failed:
logging.warning("IMPORT FAILED: {}".format(node))
logging.warning("IMPORT FAILED: %s", node)
# logging.warning("IMPORT FAILED: {}".format(node))
logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
if args.windows_standalone_build:
logging.warning("Please run the update script: update/update_comfyui.bat")

599
server.py

File diff suppressed because it is too large Load Diff

View File

@ -30,5 +30,5 @@ def load_extra_path_config(yaml_path):
elif not os.path.isabs(full_path):
full_path = os.path.abspath(os.path.join(yaml_dir, y))
normalized_path = os.path.normpath(full_path)
logging.info("Adding extra search path {} {}".format(x, normalized_path))
logging.info("Adding extra search path %s: %s", x, normalized_path)
folder_paths.add_model_folder_path(x, normalized_path, is_default)