This commit is contained in:
Souyama 2026-01-07 19:39:52 +00:00 committed by GitHub
commit 505c6d10d8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
64 changed files with 663 additions and 464 deletions

View File

@ -22,7 +22,7 @@ class AppSettings():
with open(file) as f: with open(file) as f:
return json.load(f) return json.load(f)
except: except:
logging.error(f"The user settings file is corrupted: {file}") logging.error("The user settings file is corrupted: %s", file)
return {} return {}
else: else:
return {} return {}

View File

@ -27,7 +27,7 @@ def safe_load_json_file(file_path: str) -> dict:
with open(file_path, "r", encoding="utf-8") as f: with open(file_path, "r", encoding="utf-8") as f:
return json.load(f) return json.load(f)
except json.JSONDecodeError: except json.JSONDecodeError:
logging.error(f"Error loading {file_path}") logging.error("Error loading %s", file_path)
return {} return {}

View File

@ -67,7 +67,7 @@ def get_db_path():
def init_db(): def init_db():
db_url = args.database_url db_url = args.database_url
logging.debug(f"Database URL: {db_url}") logging.debug("Database URL: %s", db_url)
db_path = get_db_path() db_path = get_db_path()
db_exists = os.path.exists(db_path) db_exists = os.path.exists(db_path)
@ -95,7 +95,7 @@ def init_db():
try: try:
command.upgrade(config, target_rev) command.upgrade(config, target_rev)
logging.info(f"Database upgraded from {current_rev} to {target_rev}") logging.info("Database upgraded from %s to %s", current_rev, target_rev)
except Exception as e: except Exception as e:
if backup_path: if backup_path:
# Restore the database from backup if upgrade fails # Restore the database from backup if upgrade fails

View File

@ -53,7 +53,7 @@ def get_required_frontend_version():
if line.startswith("comfyui-frontend-package=="): if line.startswith("comfyui-frontend-package=="):
version_str = line.split("==")[-1] version_str = line.split("==")[-1]
if not is_valid_version(version_str): if not is_valid_version(version_str):
logging.error(f"Invalid version format in requirements.txt: {version_str}") logging.error("Invalid version format in requirements.txt: %s", version_str)
return None return None
return version_str return version_str
logging.error("comfyui-frontend-package not found in requirements.txt") logging.error("comfyui-frontend-package not found in requirements.txt")
@ -62,7 +62,7 @@ def get_required_frontend_version():
logging.error("requirements.txt not found. Cannot determine required frontend version.") logging.error("requirements.txt not found. Cannot determine required frontend version.")
return None return None
except Exception as e: except Exception as e:
logging.error(f"Error reading requirements.txt: {e}") logging.error("Error reading requirements.txt: %s", e)
return None return None
@ -87,9 +87,9 @@ ________________________________________________________________________
""".strip() """.strip()
) )
else: else:
logging.info("ComfyUI frontend version: {}".format(frontend_version_str)) logging.info("ComfyUI frontend version: %s", frontend_version_str)
except Exception as e: except Exception as e:
logging.error(f"Failed to check frontend version: {e}") logging.error("Failed to check frontend version: %s", e)
REQUEST_TIMEOUT = 10 # seconds REQUEST_TIMEOUT = 10 # seconds
@ -225,7 +225,7 @@ class FrontendManager:
if line.startswith("comfyui-workflow-templates=="): if line.startswith("comfyui-workflow-templates=="):
version_str = line.split("==")[-1] version_str = line.split("==")[-1]
if not is_valid_version(version_str): if not is_valid_version(version_str):
logging.error(f"Invalid templates version format in requirements.txt: {version_str}") logging.error("Invalid templates version format in requirements.txt: %s", version_str)
return None return None
return version_str return version_str
logging.error("comfyui-workflow-templates not found in requirements.txt") logging.error("comfyui-workflow-templates not found in requirements.txt")
@ -234,7 +234,7 @@ class FrontendManager:
logging.error("requirements.txt not found. Cannot determine required templates version.") logging.error("requirements.txt not found. Cannot determine required templates version.")
return None return None
except Exception as e: except Exception as e:
logging.error(f"Error reading requirements.txt: {e}") logging.error("Error reading requirements.txt: %s", e)
return None return None
@classmethod @classmethod
@ -282,7 +282,7 @@ comfyui-workflow-templates is not installed.
try: try:
template_entries = list(iter_templates()) template_entries = list(iter_templates())
except Exception as exc: except Exception as exc:
logging.error(f"Failed to enumerate workflow templates: {exc}") logging.error("Failed to enumerate workflow templates: %s", exc)
return None return None
asset_map: Dict[str, str] = {} asset_map: Dict[str, str] = {}
@ -293,7 +293,7 @@ comfyui-workflow-templates is not installed.
entry.template_id, asset.filename entry.template_id, asset.filename
) )
except Exception as exc: except Exception as exc:
logging.error(f"Failed to resolve template asset paths: {exc}") logging.error("Failed to resolve template asset paths: %s", exc)
return None return None
if not asset_map: if not asset_map:
@ -390,12 +390,12 @@ comfyui-workflow-templates is not installed.
) )
if os.path.exists(expected_path): if os.path.exists(expected_path):
logging.info( logging.info(
f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}" "Using existing copy of specific frontend version tag: %s/%s@%s", repo_owner, repo_name, version
) )
return expected_path return expected_path
logging.info( logging.info(
f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub..." "Initializing frontend: %s/%s@%s, requesting version details from GitHub...", repo_owner, repo_name, version
) )
provider = provider or FrontEndProvider(repo_owner, repo_name) provider = provider or FrontEndProvider(repo_owner, repo_name)

View File

@ -144,7 +144,7 @@ class ModelFileManager:
result.append(file_info) result.append(file_info)
except Exception as e: except Exception as e:
logging.warning(f"Warning: Unable to access {file_name}. Error: {e}. Skipping this file.") logging.warning("Warning: Unable to access %s. Error: %s. Skipping this file.", file_name, e)
continue continue
for d in subdirs: for d in subdirs:
@ -152,7 +152,7 @@ class ModelFileManager:
try: try:
dirs[path] = os.path.getmtime(path) dirs[path] = os.path.getmtime(path)
except FileNotFoundError: except FileNotFoundError:
logging.warning(f"Warning: Unable to access {path}. Skipping this path.") logging.warning("Warning: Unable to access %s. Skipping this path.", path)
continue continue
return result, dirs, time.perf_counter() return result, dirs, time.perf_counter()

View File

@ -241,7 +241,7 @@ class UserManager():
try: try:
requested_rel_path = parse.unquote(requested_rel_path) requested_rel_path = parse.unquote(requested_rel_path)
except Exception as e: except Exception as e:
logging.warning(f"Failed to decode path parameter: {requested_rel_path}, Error: {e}") logging.warning("Failed to decode path parameter: %s, Error: %s", requested_rel_path, e)
return web.Response(status=400, text="Invalid characters in path parameter") return web.Response(status=400, text="Invalid characters in path parameter")
@ -256,7 +256,7 @@ class UserManager():
except KeyError as e: except KeyError as e:
# Invalid user detected by get_request_user_id inside get_request_user_filepath # Invalid user detected by get_request_user_id inside get_request_user_filepath
logging.warning(f"Access denied for user: {e}") logging.warning("Access denied for user: %s", e)
return web.Response(status=403, text="Invalid user specified in request") return web.Response(status=403, text="Invalid user specified in request")
@ -304,11 +304,11 @@ class UserManager():
entry_info["size"] = stats.st_size entry_info["size"] = stats.st_size
entry_info["modified"] = stats.st_mtime entry_info["modified"] = stats.st_mtime
except OSError as stat_error: except OSError as stat_error:
logging.warning(f"Could not stat file {file_path}: {stat_error}") logging.warning("Could not stat file %s: %s", file_path, stat_error)
pass # Include file with available info pass # Include file with available info
results.append(entry_info) results.append(entry_info)
except OSError as e: except OSError as e:
logging.error(f"Error listing directory {target_abs_path}: {e}") logging.error("Error listing directory %s: %s", target_abs_path, e)
return web.Response(status=500, text="Error reading directory contents") return web.Response(status=500, text="Error reading directory contents")
# Sort results alphabetically, directories first then files # Sort results alphabetically, directories first then files
@ -380,7 +380,7 @@ class UserManager():
with open(path, "wb") as f: with open(path, "wb") as f:
f.write(body) f.write(body)
except OSError as e: except OSError as e:
logging.warning(f"Error saving file '{path}': {e}") logging.warning("Error saving file '%s': %s", path, e)
return web.Response( return web.Response(
status=400, status=400,
reason="Invalid filename. Please avoid special characters like :\\/*?\"<>|" reason="Invalid filename. Please avoid special characters like :\\/*?\"<>|"
@ -444,7 +444,7 @@ class UserManager():
if not overwrite and os.path.exists(dest): if not overwrite and os.path.exists(dest):
return web.Response(status=409, text="File already exists") return web.Response(status=409, text="File already exists")
logging.info(f"moving '{source}' -> '{dest}'") logging.info("moving '%s' -> '%s'", source, dest)
shutil.move(source, dest) shutil.move(source, dest)
user_path = self.get_request_user_filepath(request, None) user_path = self.get_request_user_filepath(request, None)

View File

@ -84,8 +84,8 @@ def load_audio_encoder_from_sd(sd, prefix=""):
audio_encoder = AudioEncoderModel(config) audio_encoder = AudioEncoderModel(config)
m, u = audio_encoder.load_sd(sd) m, u = audio_encoder.load_sd(sd)
if len(m) > 0: if len(m) > 0:
logging.warning("missing audio encoder: {}".format(m)) logging.warning("missing audio encoder: %s", m)
if len(u) > 0: if len(u) > 0:
logging.warning("unexpected audio encoder: {}".format(u)) logging.warning("unexpected audio encoder: %s", u)
return audio_encoder return audio_encoder

View File

@ -130,7 +130,7 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
clip = ClipVisionModel(json_config) clip = ClipVisionModel(json_config)
m, u = clip.load_sd(sd) m, u = clip.load_sd(sd)
if len(m) > 0: if len(m) > 0:
logging.warning("missing clip vision: {}".format(m)) logging.warning("missing clip vision: %s", m)
u = set(u) u = set(u)
keys = list(sd.keys()) keys = list(sd.keys())
for k in keys: for k in keys:

View File

@ -124,9 +124,9 @@ class IndexListContextHandler(ContextHandlerABC):
def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool: def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool:
# for now, assume first dim is batch - should have stored on BaseModel in actual implementation # for now, assume first dim is batch - should have stored on BaseModel in actual implementation
if x_in.size(self.dim) > self.context_length: if x_in.size(self.dim) > self.context_length:
logging.info(f"Using context windows {self.context_length} with overlap {self.context_overlap} for {x_in.size(self.dim)} frames.") logging.info("Using context windows %d with overlap %d for %d frames.", self.context_length, self.context_overlap, x_in.size(self.dim))
if self.cond_retain_index_list: if self.cond_retain_index_list:
logging.info(f"Retaining original cond for indexes: {self.cond_retain_index_list}") logging.info("Retaining original cond for indexes: %s", self.cond_retain_index_list)
return True return True
return False return False
@ -143,7 +143,7 @@ class IndexListContextHandler(ContextHandlerABC):
# if multiple conds, split based on primary region # if multiple conds, split based on primary region
if self.split_conds_to_windows and len(cond_in) > 1: if self.split_conds_to_windows and len(cond_in) > 1:
region = window.get_region_index(len(cond_in)) region = window.get_region_index(len(cond_in))
logging.info(f"Splitting conds to windows; using region {region} for window {window.index_list[0]}-{window.index_list[-1]} with center ratio {window.center_ratio:.3f}") logging.info("Splitting conds to windows; using region %d for window %d-%d with center ratio %.3f", region, window.index_list[0], window.index_list[-1], window.center_ratio)
cond_in = [cond_in[region]] cond_in = [cond_in[region]]
# cond object is a list containing a dict - outer list is irrelevant, so just loop through it # cond object is a list containing a dict - outer list is irrelevant, so just loop through it
for actual_cond in cond_in: for actual_cond in cond_in:

View File

@ -442,10 +442,10 @@ def controlnet_load_state_dict(control_model, sd):
missing, unexpected = control_model.load_state_dict(sd, strict=False) missing, unexpected = control_model.load_state_dict(sd, strict=False)
if len(missing) > 0: if len(missing) > 0:
logging.warning("missing controlnet keys: {}".format(missing)) logging.warning("missing controlnet keys: %s", missing)
if len(unexpected) > 0: if len(unexpected) > 0:
logging.debug("unexpected controlnet keys: {}".format(unexpected)) logging.debug("unexpected controlnet keys: %s", unexpected)
return control_model return control_model
@ -668,7 +668,7 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}):
leftover_keys = controlnet_data.keys() leftover_keys = controlnet_data.keys()
if len(leftover_keys) > 0: if len(leftover_keys) > 0:
logging.warning("leftover keys: {}".format(leftover_keys)) logging.warning("leftover keys: %s", leftover_keys)
controlnet_data = new_sd controlnet_data = new_sd
elif "controlnet_blocks.0.weight" in controlnet_data: elif "controlnet_blocks.0.weight" in controlnet_data:
if "double_blocks.0.img_attn.norm.key_norm.scale" in controlnet_data: if "double_blocks.0.img_attn.norm.key_norm.scale" in controlnet_data:
@ -753,10 +753,10 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}):
missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False) missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
if len(missing) > 0: if len(missing) > 0:
logging.warning("missing controlnet keys: {}".format(missing)) logging.warning("missing controlnet keys: %s", missing)
if len(unexpected) > 0: if len(unexpected) > 0:
logging.debug("unexpected controlnet keys: {}".format(unexpected)) logging.debug("unexpected controlnet keys: %s", unexpected)
global_average_pooling = model_options.get("global_average_pooling", False) global_average_pooling = model_options.get("global_average_pooling", False)
control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype) control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype)
@ -771,7 +771,7 @@ def load_controlnet(ckpt_path, model=None, model_options={}):
cnet = load_controlnet_state_dict(comfy.utils.load_torch_file(ckpt_path, safe_load=True), model=model, model_options=model_options) cnet = load_controlnet_state_dict(comfy.utils.load_torch_file(ckpt_path, safe_load=True), model=model, model_options=model_options)
if cnet is None: if cnet is None:
logging.error("error checkpoint does not contain controlnet or t2i adapter data {}".format(ckpt_path)) logging.error("error checkpoint does not contain controlnet or t2i adapter data %s", ckpt_path)
return cnet return cnet
class T2IAdapter(ControlBase): class T2IAdapter(ControlBase):
@ -876,9 +876,9 @@ def load_t2i_adapter(t2i_data, model_options={}): #TODO: model_options
missing, unexpected = model_ad.load_state_dict(t2i_data) missing, unexpected = model_ad.load_state_dict(t2i_data)
if len(missing) > 0: if len(missing) > 0:
logging.warning("t2i missing {}".format(missing)) logging.warning("t2i missing", missing)
if len(unexpected) > 0: if len(unexpected) > 0:
logging.debug("t2i unexpected {}".format(unexpected)) logging.debug("t2i unexpected", unexpected)
return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm) return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm)

View File

@ -86,7 +86,7 @@ def convert_vae_state_dict(vae_state_dict):
for k, v in new_state_dict.items(): for k, v in new_state_dict.items():
for weight_name in weights_to_convert: for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k: if f"mid.attn_1.{weight_name}.weight" in k:
logging.debug(f"Reshaping {k} for SD format") logging.debug("Reshaping %s for SD format", k)
new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d) new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d)
return new_state_dict return new_state_dict

View File

@ -475,7 +475,7 @@ class UniPC:
return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs) return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True): def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
logging.info(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') logging.info("using unified predictor-corrector with order %s (solver type: vary coeff)", order)
ns = self.noise_schedule ns = self.noise_schedule
assert order <= len(model_prev_list) assert order <= len(model_prev_list)

View File

@ -666,7 +666,7 @@ def load_hook_lora_for_models(model: ModelPatcher, clip: CLIP, lora: dict[str, t
k1 = set(k1) k1 = set(k1)
for x in loaded: for x in loaded:
if (x not in k) and (x not in k1): if (x not in k) and (x not in k1):
logging.warning(f"NOT LOADED {x}") logging.warning("NOT LOADED %s", x)
return (new_modelpatcher, new_clip, hook_group) return (new_modelpatcher, new_clip, hook_group)
def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]): def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]):

View File

@ -295,7 +295,7 @@ class TimestepEmbedding(nn.Module):
def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, weight_args={}, operations=None): def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, weight_args={}, operations=None):
super().__init__() super().__init__()
logging.debug( logging.debug(
f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility." "Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora
) )
self.linear_1 = operations.Linear(in_features, out_features, bias=not use_adaln_lora, **weight_args) self.linear_1 = operations.Linear(in_features, out_features, bias=not use_adaln_lora, **weight_args)
self.activation = nn.SiLU() self.activation = nn.SiLU()

View File

@ -632,9 +632,7 @@ class DecoderBase(nn.Module):
curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res) self.z_shape = (1, z_channels, curr_res, curr_res)
logging.debug( logging.debug(
"Working with z of shape {} = {} dimensions.".format( "Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)
self.z_shape, np.prod(self.z_shape)
)
) )
# z to block_in # z to block_in
@ -929,9 +927,7 @@ class DecoderFactorized(nn.Module):
curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1)
self.z_shape = (1, z_channels, curr_res, curr_res) self.z_shape = (1, z_channels, curr_res, curr_res)
logging.debug( logging.debug(
"Working with z of shape {} = {} dimensions.".format( "Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)
self.z_shape, np.prod(self.z_shape)
)
) )
# z to block_in # z to block_in

View File

@ -216,7 +216,7 @@ class GeneralDIT(nn.Module):
else: else:
raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}")
logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") logging.debug("Building positional embedding with %s class, impl %s", self.pos_emb_cls, cls_type)
kwargs = dict( kwargs = dict(
model_channels=self.model_channels, model_channels=self.model_channels,
len_h=self.max_img_h // self.patch_spatial, len_h=self.max_img_h // self.patch_spatial,

View File

@ -118,13 +118,20 @@ class Attention(nn.Module):
operations=None, operations=None,
) -> None: ) -> None:
super().__init__() super().__init__()
context_dim = query_dim if context_dim is None else context_dim
logging.debug( logging.debug(
f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " "Setting up %s. Query dim is %d, context_dim is %d and using "
f"{n_heads} heads with a dimension of {head_dim}." "%d heads with a dimension of %d.",
self.__class__.__name__,
query_dim,
context_dim,
n_heads,
head_dim,
) )
self.is_selfattn = context_dim is None # self attention self.is_selfattn = context_dim is None # self attention
context_dim = query_dim if context_dim is None else context_dim
inner_dim = head_dim * n_heads inner_dim = head_dim * n_heads
self.n_heads = n_heads self.n_heads = n_heads
@ -226,7 +233,7 @@ class TimestepEmbedding(nn.Module):
def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, device=None, dtype=None, operations=None): def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, device=None, dtype=None, operations=None):
super().__init__() super().__init__()
logging.debug( logging.debug(
f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility." "Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora
) )
self.in_dim = in_features self.in_dim = in_features
self.out_dim = out_features self.out_dim = out_features
@ -718,7 +725,7 @@ class MiniTrainDIT(nn.Module):
else: else:
raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}")
logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") logging.debug("Building positional embedding with %s class, impl %s", self.pos_emb_cls, cls_type)
kwargs = dict( kwargs = dict(
model_channels=self.model_channels, model_channels=self.model_channels,
len_h=self.max_img_h // self.patch_spatial, len_h=self.max_img_h // self.patch_spatial,

View File

@ -90,9 +90,9 @@ class CausalContinuousVideoTokenizer(nn.Module):
self.distribution = IdentityDistribution() # ContinuousFormulation[formulation_name].value() self.distribution = IdentityDistribution() # ContinuousFormulation[formulation_name].value()
num_parameters = sum(param.numel() for param in self.parameters()) num_parameters = sum(param.numel() for param in self.parameters())
logging.debug(f"model={self.name}, num_parameters={num_parameters:,}") logging.debug("model=%s, num_parameters=%d", self.name, num_parameters)
logging.debug( logging.debug(
f"z_channels={z_channels}, latent_channels={self.latent_channels}." "z_channels=%d, latent_channels=%d.", z_channels, self.latent_channels
) )
latent_temporal_chunk = 16 latent_temporal_chunk = 16

View File

@ -401,9 +401,9 @@ def make_attn(in_channels, attn_type="vanilla", norm_type="group"):
attn_type = AttentionType.str_to_enum(attn_type) attn_type = AttentionType.str_to_enum(attn_type)
if attn_type != AttentionType.NONE: if attn_type != AttentionType.NONE:
logging.info(f"making attention of type '{attn_type.value}' with {in_channels} in_channels") logging.info("making attention of type '%s' with %s in_channels", attn_type.value, in_channels)
else: else:
logging.info(f"making identity attention with {in_channels} in_channels") logging.info("making identity attention with %s in_channels", in_channels)
match attn_type: match attn_type:
case AttentionType.VANILLA: case AttentionType.VANILLA:

View File

@ -58,7 +58,7 @@ class AbstractAutoencoder(torch.nn.Module):
if self.use_ema: if self.use_ema:
self.model_ema = LitEma(self, decay=ema_decay) self.model_ema = LitEma(self, decay=ema_decay)
logging.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") logging.info("Keeping EMAs of %s.", len(list(self.model_ema.buffers())))
def get_input(self, batch) -> Any: def get_input(self, batch) -> Any:
raise NotImplementedError() raise NotImplementedError()
@ -74,14 +74,14 @@ class AbstractAutoencoder(torch.nn.Module):
self.model_ema.store(self.parameters()) self.model_ema.store(self.parameters())
self.model_ema.copy_to(self) self.model_ema.copy_to(self)
if context is not None: if context is not None:
logging.info(f"{context}: Switched to EMA weights") logging.info("%s: Switched to EMA weights", context)
try: try:
yield None yield None
finally: finally:
if self.use_ema: if self.use_ema:
self.model_ema.restore(self.parameters()) self.model_ema.restore(self.parameters())
if context is not None: if context is not None:
logging.info(f"{context}: Restored training weights") logging.info("%s: Restored training weights", context)
def encode(self, *args, **kwargs) -> torch.Tensor: def encode(self, *args, **kwargs) -> torch.Tensor:
raise NotImplementedError("encode()-method of abstract base class called") raise NotImplementedError("encode()-method of abstract base class called")
@ -90,7 +90,7 @@ class AbstractAutoencoder(torch.nn.Module):
raise NotImplementedError("decode()-method of abstract base class called") raise NotImplementedError("decode()-method of abstract base class called")
def instantiate_optimizer_from_config(self, params, lr, cfg): def instantiate_optimizer_from_config(self, params, lr, cfg):
logging.info(f"loading >>> {cfg['target']} <<< optimizer from config") logging.info("loading >>> %s <<< optimizer from config", cfg['target'])
return get_obj_from_str(cfg["target"])( return get_obj_from_str(cfg["target"])(
params, lr=lr, **cfg.get("params", dict()) params, lr=lr, **cfg.get("params", dict())
) )

View File

@ -10,7 +10,7 @@ import logging
import functools import functools
from .diffusionmodules.util import AlphaBlender, timestep_embedding from .diffusionmodules.util import AlphaBlender, timestep_embedding
from .sub_quadratic_attention import efficient_dot_product_attention from comfy.ldm.modules.sub_quadratic_attention import efficient_dot_product_attention
from comfy import model_management from comfy import model_management
@ -25,7 +25,11 @@ try:
except ImportError as e: except ImportError as e:
if model_management.sage_attention_enabled(): if model_management.sage_attention_enabled():
if e.name == "sageattention": if e.name == "sageattention":
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") logging.error("""
To use the `--use-sage-attention` feature, the `sageattention` package must be installed first.
command:
%s -m pip install sageattention""", sys.executable)
else: else:
raise e raise e
exit(-1) exit(-1)
@ -43,7 +47,11 @@ try:
FLASH_ATTENTION_IS_AVAILABLE = True FLASH_ATTENTION_IS_AVAILABLE = True
except ImportError: except ImportError:
if model_management.flash_attention_enabled(): if model_management.flash_attention_enabled():
logging.error(f"\n\nTo use the `--use-flash-attention` feature, the `flash-attn` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install flash-attn") logging.error("""
To use the `--use-flash-attention` feature, the `flash-attn` package must be installed first.
command:
%s -m pip install flash-attn""", sys.executable)
exit(-1) exit(-1)
REGISTERED_ATTENTION_FUNCTIONS = {} REGISTERED_ATTENTION_FUNCTIONS = {}
@ -52,7 +60,7 @@ def register_attention_function(name: str, func: Callable):
if name not in REGISTERED_ATTENTION_FUNCTIONS: if name not in REGISTERED_ATTENTION_FUNCTIONS:
REGISTERED_ATTENTION_FUNCTIONS[name] = func REGISTERED_ATTENTION_FUNCTIONS[name] = func
else: else:
logging.warning(f"Attention function {name} already registered, skipping registration.") logging.warning("Attention function %s already registered, skipping registration.", name)
def get_attention_function(name: str, default: Any=...) -> Union[Callable, None]: def get_attention_function(name: str, default: Any=...) -> Union[Callable, None]:
if name == "optimized": if name == "optimized":
@ -382,7 +390,7 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
steps *= 2 steps *= 2
if steps > 64: if steps > 64:
raise e raise e
logging.warning("out of memory error, increasing steps and trying again {}".format(steps)) logging.warning("out of memory error, increasing steps and trying again", steps)
else: else:
raise e raise e
@ -548,7 +556,7 @@ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=
try: try:
out = sageattn(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout) out = sageattn(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout)
except Exception as e: except Exception as e:
logging.error("Error running sage attention: {}, using pytorch attention instead.".format(e)) logging.error("Error running sage attention: %s, using pytorch attention instead.", e)
exception_fallback = True exception_fallback = True
if exception_fallback: if exception_fallback:
if tensor_layout == "NHD": if tensor_layout == "NHD":
@ -707,7 +715,7 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
causal=False, causal=False,
).transpose(1, 2) ).transpose(1, 2)
except Exception as e: except Exception as e:
logging.warning(f"Flash Attention failed, using default SDPA: {e}") logging.warning("Flash Attention failed, using default SDPA: %s", e)
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
if not skip_output_reshape: if not skip_output_reshape:
out = ( out = (

View File

@ -272,7 +272,7 @@ def slice_attention(q, k, v):
steps *= 2 steps *= 2
if steps > 128: if steps > 128:
raise e raise e
logging.warning("out of memory error, increasing steps and trying again {}".format(steps)) logging.warning("out of memory error, increasing steps and trying again %s", steps)
return r1 return r1
@ -725,8 +725,7 @@ class Decoder(nn.Module):
block_in = ch*ch_mult[self.num_resolutions-1] block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1) curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res) self.z_shape = (1,z_channels,curr_res,curr_res)
logging.debug("Working with z of shape {} = {} dimensions.".format( logging.debug("Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape))
self.z_shape, np.prod(self.z_shape)))
# z to block_in # z to block_in
self.conv_in = conv_op(z_channels, self.conv_in = conv_op(z_channels,

View File

@ -369,7 +369,7 @@ def apply_control(h, control, name):
try: try:
h += ctrl h += ctrl
except: except:
logging.warning("warning control could not be applied {} {}".format(h.shape, ctrl.shape)) logging.warning("warning control could not be applied %s %s", h.shape, ctrl.shape)
return h return h
class UNetModel(nn.Module): class UNetModel(nn.Module):

View File

@ -131,7 +131,7 @@ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timestep
# add one to get the final alpha values right (the ones from first scale to data during sampling) # add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1 steps_out = ddim_timesteps + 1
if verbose: if verbose:
logging.info(f'Selected timesteps for ddim sampler: {steps_out}') logging.info("Selected timesteps for ddim sampler: %s", steps_out)
return steps_out return steps_out
@ -143,9 +143,8 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# according the the formula provided in https://arxiv.org/abs/2010.02502 # according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose: if verbose:
logging.info(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') logging.info("Selected alphas for ddim sampler: a_t: %s; a_(t-1): %s", alphas, alphas_prev)
logging.info(f'For the chosen value of eta, which is {eta}, ' logging.info("For the chosen value of eta, which is %s, this results in the following sigma_t schedule for ddim sampler %s", eta, sigmas)
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev return sigmas, alphas, alphas_prev

View File

@ -66,7 +66,7 @@ def mean_flat(tensor):
def count_params(model, verbose=False): def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters()) total_params = sum(p.numel() for p in model.parameters())
if verbose: if verbose:
logging.info(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") logging.info("%s has %.2f M params.", model.__class__.__name__, total_params * 1e-06)
return total_params return total_params

View File

@ -90,7 +90,7 @@ def load_lora(lora, to_load, log_missing=True):
if log_missing: if log_missing:
for x in lora.keys(): for x in lora.keys():
if x not in loaded_keys: if x not in loaded_keys:
logging.warning("lora key not loaded: {}".format(x)) logging.warning("lora key not loaded: %s", x)
return patch_dict return patch_dict
@ -390,7 +390,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori
if isinstance(v, weight_adapter.WeightAdapterBase): if isinstance(v, weight_adapter.WeightAdapterBase):
output = v.calculate_weight(weight, key, strength, strength_model, offset, function, intermediate_dtype, original_weights) output = v.calculate_weight(weight, key, strength, strength_model, offset, function, intermediate_dtype, original_weights)
if output is None: if output is None:
logging.warning("Calculate Weight Failed: {} {}".format(v.name, key)) logging.warning("Calculate Weight Failed: %s %s", v.name, key)
else: else:
weight = output weight = output
if old_weight is not None: if old_weight is not None:
@ -408,12 +408,12 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori
# An extra flag to pad the weight if the diff's shape is larger than the weight # An extra flag to pad the weight if the diff's shape is larger than the weight
do_pad_weight = len(v) > 1 and v[1]['pad_weight'] do_pad_weight = len(v) > 1 and v[1]['pad_weight']
if do_pad_weight and diff.shape != weight.shape: if do_pad_weight and diff.shape != weight.shape:
logging.info("Pad weight {} from {} to shape: {}".format(key, weight.shape, diff.shape)) logging.info("Pad weight %s from %s to shape: %s", key, weight.shape, diff.shape)
weight = pad_tensor_to_shape(weight, diff.shape) weight = pad_tensor_to_shape(weight, diff.shape)
if strength != 0.0: if strength != 0.0:
if diff.shape != weight.shape: if diff.shape != weight.shape:
logging.warning("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, diff.shape, weight.shape)) logging.warning("WARNING SHAPE MISMATCH %s WEIGHT NOT MERGED %s != %s", key, diff.shape, weight.shape)
else: else:
weight += function(strength * comfy.model_management.cast_to_device(diff, weight.device, weight.dtype)) weight += function(strength * comfy.model_management.cast_to_device(diff, weight.device, weight.dtype))
elif patch_type == "set": elif patch_type == "set":
@ -424,7 +424,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori
comfy.model_management.cast_to_device(original_weights[key][0][0], weight.device, intermediate_dtype) comfy.model_management.cast_to_device(original_weights[key][0][0], weight.device, intermediate_dtype)
weight += function(strength * comfy.model_management.cast_to_device(diff_weight, weight.device, weight.dtype)) weight += function(strength * comfy.model_management.cast_to_device(diff_weight, weight.device, weight.dtype))
else: else:
logging.warning("patch type not recognized {} {}".format(patch_type, key)) logging.warning("patch type not recognized %s %s", patch_type, key)
if old_weight is not None: if old_weight is not None:
weight = old_weight weight = old_weight

View File

@ -144,7 +144,7 @@ class BaseModel(torch.nn.Module):
if comfy.model_management.force_channels_last(): if comfy.model_management.force_channels_last():
self.diffusion_model.to(memory_format=torch.channels_last) self.diffusion_model.to(memory_format=torch.channels_last)
logging.debug("using channels last mode for diffusion model") logging.debug("using channels last mode for diffusion model")
logging.info("model weight dtype {}, manual cast: {}".format(self.get_dtype(), self.manual_cast_dtype)) logging.info("model weight dtype %s, manual cast: %s", self.get_dtype(), self.manual_cast_dtype)
self.model_type = model_type self.model_type = model_type
self.model_sampling = model_sampling(model_config, model_type) self.model_sampling = model_sampling(model_config, model_type)
@ -153,8 +153,8 @@ class BaseModel(torch.nn.Module):
self.adm_channels = 0 self.adm_channels = 0
self.concat_keys = () self.concat_keys = ()
logging.info("model_type {}".format(model_type.name)) logging.info("model_type %s", model_type.name)
logging.debug("adm {}".format(self.adm_channels)) logging.debug("adm %s", self.adm_channels)
self.memory_usage_factor = model_config.memory_usage_factor self.memory_usage_factor = model_config.memory_usage_factor
self.memory_usage_factor_conds = () self.memory_usage_factor_conds = ()
self.memory_usage_shape_process = {} self.memory_usage_shape_process = {}
@ -308,10 +308,10 @@ class BaseModel(torch.nn.Module):
to_load = self.model_config.process_unet_state_dict(to_load) to_load = self.model_config.process_unet_state_dict(to_load)
m, u = self.diffusion_model.load_state_dict(to_load, strict=False) m, u = self.diffusion_model.load_state_dict(to_load, strict=False)
if len(m) > 0: if len(m) > 0:
logging.warning("unet missing: {}".format(m)) logging.warning("unet missing: %s", m)
if len(u) > 0: if len(u) > 0:
logging.warning("unet unexpected: {}".format(u)) logging.warning("unet unexpected: %s", u)
del to_load del to_load
return self return self

View File

@ -775,7 +775,7 @@ def model_config_from_unet_config(unet_config, state_dict=None):
if model_config.matches(unet_config, state_dict): if model_config.matches(unet_config, state_dict):
return model_config(unet_config) return model_config(unet_config)
logging.error("no match {}".format(unet_config)) logging.error("no match %s", unet_config)
return None return None
def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=False, metadata=None): def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=False, metadata=None):

View File

@ -98,7 +98,8 @@ if args.directml is not None:
directml_device = torch_directml.device() directml_device = torch_directml.device()
else: else:
directml_device = torch_directml.device(device_index) directml_device = torch_directml.device(device_index)
logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index))) logging.info("Using directml with device: %s", torch_directml.device_name(device_index))
# torch_directml.disable_tiled_resources(True) # torch_directml.disable_tiled_resources(True)
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default. lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
@ -238,13 +239,13 @@ def mac_version():
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024)
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) logging.info("Total VRAM %0.0f MB, total RAM %0.0f MB", total_vram, total_ram)
try: try:
logging.info("pytorch version: {}".format(torch_version)) logging.info("pytorch version: %s", torch_version)
mac_ver = mac_version() mac_ver = mac_version()
if mac_ver is not None: if mac_ver is not None:
logging.info("Mac Version {}".format(mac_ver)) logging.info("Mac Version %s", mac_ver)
except: except:
pass pass
@ -268,7 +269,7 @@ else:
pass pass
try: try:
XFORMERS_VERSION = xformers.version.__version__ XFORMERS_VERSION = xformers.version.__version__
logging.info("xformers version: {}".format(XFORMERS_VERSION)) logging.info("xformers version: %s", XFORMERS_VERSION)
if XFORMERS_VERSION.startswith("0.0.18"): if XFORMERS_VERSION.startswith("0.0.18"):
logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.") logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
logging.warning("Please downgrade or upgrade xformers to a different version.\n") logging.warning("Please downgrade or upgrade xformers to a different version.\n")
@ -349,8 +350,8 @@ try:
except: except:
rocm_version = (6, -1) rocm_version = (6, -1)
logging.info("AMD arch: {}".format(arch)) logging.info("AMD arch: %s", arch)
logging.info("ROCm version: {}".format(rocm_version)) logging.info("ROCm version: %s", rocm_version)
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
if importlib.util.find_spec('triton') is not None: # AMD efficient attention implementation depends on triton. TODO: better way of detecting if it's compiled in or not. if importlib.util.find_spec('triton') is not None: # AMD efficient attention implementation depends on triton. TODO: better way of detecting if it's compiled in or not.
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
@ -415,7 +416,7 @@ if cpu_state != CPUState.GPU:
if cpu_state == CPUState.MPS: if cpu_state == CPUState.MPS:
vram_state = VRAMState.SHARED vram_state = VRAMState.SHARED
logging.info(f"Set vram state to: {vram_state.name}") logging.info("Set vram state to: %s", vram_state.name)
DISABLE_SMART_MEMORY = args.disable_smart_memory DISABLE_SMART_MEMORY = args.disable_smart_memory
@ -444,7 +445,7 @@ def get_torch_device_name(device):
return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
try: try:
logging.info("Device: {}".format(get_torch_device_name(get_torch_device()))) logging.info("Device: %s", get_torch_device_name(get_torch_device()))
except: except:
logging.warning("Could not pick default device.") logging.warning("Could not pick default device.")
@ -573,7 +574,7 @@ if WINDOWS:
if args.reserve_vram is not None: if args.reserve_vram is not None:
EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024 EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024
logging.debug("Reserving {}MB vram for other applications.".format(EXTRA_RESERVED_VRAM / (1024 * 1024))) logging.debug("Reserving %0.2f MB of VRAM as per user request.", EXTRA_RESERVED_VRAM / (1024 * 1024))
def extra_reserved_memory(): def extra_reserved_memory():
return EXTRA_RESERVED_VRAM return EXTRA_RESERVED_VRAM
@ -602,7 +603,7 @@ def free_memory(memory_required, device, keep_loaded=[]):
if free_mem > memory_required: if free_mem > memory_required:
break break
memory_to_free = memory_required - free_mem memory_to_free = memory_required - free_mem
logging.debug(f"Unloading {current_loaded_models[i].model.model.__class__.__name__}") logging.debug("Unloading %s", current_loaded_models[i].model.model.__class__.__name__)
if current_loaded_models[i].model_unload(memory_to_free): if current_loaded_models[i].model_unload(memory_to_free):
unloaded_model.append(i) unloaded_model.append(i)
@ -652,7 +653,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
models_to_load.append(loaded) models_to_load.append(loaded)
else: else:
if hasattr(x, "model"): if hasattr(x, "model"):
logging.info(f"Requested to load {x.model.__class__.__name__}") logging.info("Requested to load %s", x.model.__class__.__name__)
models_to_load.append(loaded_model) models_to_load.append(loaded_model)
for loaded_model in models_to_load: for loaded_model in models_to_load:
@ -678,7 +679,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
free_mem = get_free_memory(device) free_mem = get_free_memory(device)
if free_mem < minimum_memory_required: if free_mem < minimum_memory_required:
models_l = free_memory(minimum_memory_required, device) models_l = free_memory(minimum_memory_required, device)
logging.info("{} models unloaded.".format(len(models_l))) logging.info("%d models unloaded.", len(models_l))
for loaded_model in models_to_load: for loaded_model in models_to_load:
model = loaded_model.model model = loaded_model.model
@ -724,7 +725,7 @@ def cleanup_models_gc():
for i in range(len(current_loaded_models)): for i in range(len(current_loaded_models)):
cur = current_loaded_models[i] cur = current_loaded_models[i]
if cur.is_dead(): if cur.is_dead():
logging.info("Potential memory leak detected with model {}, doing a full garbage collect, for maximum performance avoid circular references in the model code.".format(cur.real_model().__class__.__name__)) logging.info("Potential memory leak detected with model %s, doing a full garbage collect, for maximum performance avoid circular references in the model code.", cur.real_model().__class__.__name__)
do_gc = True do_gc = True
break break
@ -735,7 +736,7 @@ def cleanup_models_gc():
for i in range(len(current_loaded_models)): for i in range(len(current_loaded_models)):
cur = current_loaded_models[i] cur = current_loaded_models[i]
if cur.is_dead(): if cur.is_dead():
logging.warning("WARNING, memory leak with model {}. Please make sure it is not being referenced from somewhere.".format(cur.real_model().__class__.__name__)) logging.warning("WARNING, memory leak with model %s. Please make sure it is not being referenced from somewhere.", cur.real_model().__class__.__name__)
@ -1027,7 +1028,7 @@ if args.disable_async_offload:
NUM_STREAMS = 0 NUM_STREAMS = 0
if NUM_STREAMS > 0: if NUM_STREAMS > 0:
logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) logging.info("Using async weight offloading with %d streams", NUM_STREAMS)
def current_stream(device): def current_stream(device):
if device is None: if device is None:
@ -1122,7 +1123,7 @@ if not args.disable_pinned_memory:
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50%
else: else:
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95
logging.info("Enabled pinned memory {}".format(MAX_PINNED_MEMORY // (1024 * 1024))) logging.info("Enabled pinned memory. %0.2f MB max", MAX_PINNED_MEMORY / (1024 * 1024))
PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"]) PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"])

View File

@ -778,7 +778,7 @@ class ModelPatcher:
if comfy.model_management.is_device_cuda(device_to): if comfy.model_management.is_device_cuda(device_to):
torch.cuda.synchronize() torch.cuda.synchronize()
logging.debug("lowvram: loaded module regularly {} {}".format(n, m)) logging.debug("lowvram: loaded module regularly %s to %s", n, m)
m.comfy_patched_weights = True m.comfy_patched_weights = True
for x in load_completely: for x in load_completely:
@ -791,10 +791,10 @@ class ModelPatcher:
self.pin_weight_to_device("{}.{}".format(n, param)) self.pin_weight_to_device("{}.{}".format(n, param))
if lowvram_counter > 0: if lowvram_counter > 0:
logging.info("loaded partially; {:.2f} MB usable, {:.2f} MB loaded, {:.2f} MB offloaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter)) logging.info("loaded partially; %.2f MB usable, %.2f MB loaded, %.2f MB offloaded, %.2f MB buffer reserved, lowvram patches: %d", lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter)
self.model.model_lowvram = True self.model.model_lowvram = True
else: else:
logging.info("loaded completely; {:.2f} MB usable, {:.2f} MB loaded, full load: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)) logging.info("loaded completely; %.2f MB usable, %.2f MB loaded, full load: %s", lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)
self.model.model_lowvram = False self.model.model_lowvram = False
if full_load: if full_load:
self.model.to(device_to) self.model.to(device_to)
@ -941,7 +941,7 @@ class ModelPatcher:
offload_buffer = max(offload_buffer, potential_offload) offload_buffer = max(offload_buffer, potential_offload)
offload_weight_factor.append(module_mem) offload_weight_factor.append(module_mem)
offload_weight_factor.pop(0) offload_weight_factor.pop(0)
logging.debug("freed {}".format(n)) logging.debug("freed %s", n)
for param in params: for param in params:
self.pin_weight_to_device("{}.{}".format(n, param)) self.pin_weight_to_device("{}.{}".format(n, param))
@ -951,7 +951,7 @@ class ModelPatcher:
self.model.lowvram_patch_counter += patch_counter self.model.lowvram_patch_counter += patch_counter
self.model.model_loaded_weight_memory -= memory_freed self.model.model_loaded_weight_memory -= memory_freed
self.model.model_offload_buffer_memory = offload_buffer self.model.model_offload_buffer_memory = offload_buffer
logging.info("Unloaded partially: {:.2f} MB freed, {:.2f} MB remains loaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter)) logging.info("Unloaded partially: %.2f MB freed, %.2f MB remains loaded, %.2f MB buffer reserved, lowvram patches: %d", memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter)
return memory_freed return memory_freed
def partially_load(self, device_to, extra_memory=0, force_patch_weights=False): def partially_load(self, device_to, extra_memory=0, force_patch_weights=False):
@ -1256,7 +1256,7 @@ class ModelPatcher:
model_sd_keys_set = set(model_sd_keys) model_sd_keys_set = set(model_sd_keys)
for key in cached_weights: for key in cached_weights:
if key not in model_sd_keys: if key not in model_sd_keys:
logging.warning(f"Cached hook could not patch. Key does not exist in model: {key}") logging.warning("Cached hook could not patch. Key does not exist in model: %s", key)
continue continue
self.patch_cached_hook_weights(cached_weights=cached_weights, key=key, memory_counter=memory_counter) self.patch_cached_hook_weights(cached_weights=cached_weights, key=key, memory_counter=memory_counter)
model_sd_keys_set.remove(key) model_sd_keys_set.remove(key)
@ -1269,7 +1269,7 @@ class ModelPatcher:
original_weights = self.get_key_patches() original_weights = self.get_key_patches()
for key in relevant_patches: for key in relevant_patches:
if key not in model_sd_keys: if key not in model_sd_keys:
logging.warning(f"Cached hook would not patch. Key does not exist in model: {key}") logging.warning("Cached hook would not patch. Key does not exist in model: %s", key)
continue continue
self.patch_hook_weight_to_device(hooks=hooks, combined_patches=relevant_patches, key=key, original_weights=original_weights, self.patch_hook_weight_to_device(hooks=hooks, combined_patches=relevant_patches, key=key, original_weights=original_weights,
memory_counter=memory_counter) memory_counter=memory_counter)

View File

@ -455,7 +455,7 @@ class fp8_ops(manual_cast):
if out is not None: if out is not None:
return out return out
except Exception as e: except Exception as e:
logging.info("Exception during fp8 op: {}".format(e)) logging.info("Exception during fp8 op: %s", str(e))
weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True)
x = torch.nn.functional.linear(input, weight, bias) x = torch.nn.functional.linear(input, weight, bias)

View File

@ -22,9 +22,9 @@ try:
ck.registry.disable("triton") ck.registry.disable("triton")
for k, v in ck.list_backends().items(): for k, v in ck.list_backends().items():
logging.info(f"Found comfy_kitchen backend {k}: {v}") logging.info("Found comfy_kitchen backend %s: %s", k, v)
except ImportError as e: except ImportError as e:
logging.error(f"Failed to import comfy_kitchen, Error: {e}, fp8 and fp4 support will not be available.") logging.error("Failed to import comfy_kitchen, Error: %s, fp8 and fp4 support will not be available.", e)
_CK_AVAILABLE = False _CK_AVAILABLE = False
class QuantizedTensor: class QuantizedTensor:

View File

@ -95,7 +95,7 @@ def load_lora_for_models(model, clip, lora, strength_model, strength_clip):
k1 = set(k1) k1 = set(k1)
for x in loaded: for x in loaded:
if (x not in k) and (x not in k1): if (x not in k) and (x not in k1):
logging.warning("NOT LOADED {}".format(x)) logging.warning("NOT LOADED %s", x)
return (new_modelpatcher, new_clip) return (new_modelpatcher, new_clip)
@ -139,27 +139,27 @@ class CLIP:
for c in state_dict: for c in state_dict:
m, u = self.load_sd(c) m, u = self.load_sd(c)
if len(m) > 0: if len(m) > 0:
logging.warning("clip missing: {}".format(m)) logging.warning("clip missing: %s", m)
if len(u) > 0: if len(u) > 0:
logging.debug("clip unexpected: {}".format(u)) logging.debug("clip unexpected: %s", u)
else: else:
m, u = self.load_sd(state_dict, full_model=True) m, u = self.load_sd(state_dict, full_model=True)
if len(m) > 0: if len(m) > 0:
m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m)) m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m))
if len(m_filter) > 0: if len(m_filter) > 0:
logging.warning("clip missing: {}".format(m)) logging.warning("clip missing: %s", m)
else: else:
logging.debug("clip missing: {}".format(m)) logging.debug("clip missing: %s", m)
if len(u) > 0: if len(u) > 0:
logging.debug("clip unexpected {}:".format(u)) logging.debug("clip unexpected %s:", u)
if params['device'] == load_device: if params['device'] == load_device:
model_management.load_models_gpu([self.patcher], force_full_load=True) model_management.load_models_gpu([self.patcher], force_full_load=True)
self.layer_idx = None self.layer_idx = None
self.use_clip_schedule = False self.use_clip_schedule = False
logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype)) logging.info("CLIP/text encoder model load device: %s, offload device: %s, current: %s, dtype: %s", load_device, offload_device, params['device'], dtype)
self.tokenizer_options = {} self.tokenizer_options = {}
def clone(self): def clone(self):
@ -664,10 +664,10 @@ class VAE:
m, u = self.first_stage_model.load_state_dict(sd, strict=False) m, u = self.first_stage_model.load_state_dict(sd, strict=False)
if len(m) > 0: if len(m) > 0:
logging.warning("Missing VAE keys {}".format(m)) logging.warning("Missing VAE keys %s", str(m))
if len(u) > 0: if len(u) > 0:
logging.debug("Leftover VAE keys {}".format(u)) logging.debug("Leftover VAE keys %s", str(u))
if device is None: if device is None:
device = model_management.vae_device() device = model_management.vae_device()
@ -680,7 +680,7 @@ class VAE:
self.output_device = model_management.intermediate_device() self.output_device = model_management.intermediate_device()
self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device) self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device)
logging.info("VAE load device: {}, offload device: {}, dtype: {}".format(self.device, offload_device, self.vae_dtype)) logging.info("VAE load device: %s, offload device: %s, dtype: %s", self.device, offload_device, self.vae_dtype)
self.model_size() self.model_size()
def model_size(self): def model_size(self):
@ -1440,7 +1440,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
left_over = sd.keys() left_over = sd.keys()
if len(left_over) > 0: if len(left_over) > 0:
logging.debug("left over keys: {}".format(left_over)) logging.debug("left over keys: %s", left_over)
if output_model: if output_model:
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device()) model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
@ -1510,7 +1510,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None):
if k in sd: if k in sd:
new_sd[diffusers_keys[k]] = sd.pop(k) new_sd[diffusers_keys[k]] = sd.pop(k)
else: else:
logging.warning("{} {}".format(diffusers_keys[k], k)) logging.warning("%s %s", diffusers_keys[k], k)
offload_device = model_management.unet_offload_device() offload_device = model_management.unet_offload_device()
unet_weight_dtype = list(model_config.supported_inference_dtypes) unet_weight_dtype = list(model_config.supported_inference_dtypes)
@ -1539,7 +1539,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None):
model.load_model_weights(new_sd, "") model.load_model_weights(new_sd, "")
left_over = sd.keys() left_over = sd.keys()
if len(left_over) > 0: if len(left_over) > 0:
logging.info("left over keys in diffusion model: {}".format(left_over)) logging.info("left over keys in diffusion model: %s", left_over)
return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device) return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device)
@ -1547,7 +1547,7 @@ def load_diffusion_model(unet_path, model_options={}):
sd, metadata = comfy.utils.load_torch_file(unet_path, return_metadata=True) sd, metadata = comfy.utils.load_torch_file(unet_path, return_metadata=True)
model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata) model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata)
if model is None: if model is None:
logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path)) logging.error("ERROR UNSUPPORTED DIFFUSION MODEL %s", unet_path)
raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd))) raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd)))
return model return model

View File

@ -235,7 +235,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
else: else:
index += -1 index += -1
pad_extra += emb_shape pad_extra += emb_shape
logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored {} != {}".format(emb.shape[-1], tokens_embed.shape[-1])) logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored %s != %s", emb.shape[-1], tokens_embed.shape[-1])
if pad_extra > 0: if pad_extra > 0:
padd_embed = self.transformer.get_input_embeddings()(torch.tensor([[self.special_tokens["pad"]] * pad_extra], device=device, dtype=torch.long), out_dtype=torch.float32) padd_embed = self.transformer.get_input_embeddings()(torch.tensor([[self.special_tokens["pad"]] * pad_extra], device=device, dtype=torch.long), out_dtype=torch.float32)
@ -438,7 +438,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
except: except:
embed_out = safe_load_embed_zip(embed_path) embed_out = safe_load_embed_zip(embed_path)
except Exception: except Exception:
logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name)) logging.warning("%s\n\nerror loading embedding, skipping loading: %s", traceback.format_exc(), embedding_name)
return None return None
if embed_out is None: if embed_out is None:
@ -570,7 +570,7 @@ class SDTokenizer:
embedding_name = word[len(self.embedding_identifier):].strip('\n') embedding_name = word[len(self.embedding_identifier):].strip('\n')
embed, leftover = self._try_get_embedding(embedding_name) embed, leftover = self._try_get_embedding(embedding_name)
if embed is None: if embed is None:
logging.warning(f"warning, embedding:{embedding_name} does not exist, ignoring") logging.warning("warning, embedding:%s does not exist, ignoring", embedding_name)
else: else:
if len(embed.shape) == 1: if len(embed.shape) == 1:
tokens.append([(embed, weight)]) tokens.append([(embed, weight)])

View File

@ -120,5 +120,5 @@ class BASE:
self.manual_cast_dtype = manual_cast_dtype self.manual_cast_dtype = manual_cast_dtype
def __getattr__(self, name): def __getattr__(self, name):
logging.warning("\nWARNING, you accessed {} from the model config object which doesn't exist. Please fix your code.\n".format(name)) logging.warning("\nWARNING, you accessed %s from the model config object which doesn't exist. Please fix your code.\n", name)
return None return None

View File

@ -80,7 +80,7 @@ class VoiceBpeTokenizer:
token_idx = self.encode(line, lang) token_idx = self.encode(line, lang)
lyric_token_idx = lyric_token_idx + token_idx + [2] lyric_token_idx = lyric_token_idx + token_idx + [2]
except Exception as e: except Exception as e:
logging.warning("tokenize error {} for line {} major_language {}".format(e, line, lang)) logging.warning("tokenize error %s for line %s major_language %s", e, line, lang)
return {"input_ids": lyric_token_idx} return {"input_ids": lyric_token_idx}
@staticmethod @staticmethod

View File

@ -62,7 +62,7 @@ class HiDreamTEModel(torch.nn.Module):
else: else:
self.llama = None self.llama = None
logging.debug("Created HiDream text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}, llama {}:{}".format(clip_l, clip_g, t5, dtype_t5, llama, dtype_llama)) logging.debug("Created HiDream text encoder with: clip_l %s, clip_g %s, t5xxl %s:%s, llama %s:%s", clip_l, clip_g, t5, dtype_t5, llama, dtype_llama)
def set_clip_options(self, options): def set_clip_options(self, options):
if self.clip_l is not None: if self.clip_l is not None:

View File

@ -81,7 +81,7 @@ class SD3ClipModel(torch.nn.Module):
else: else:
self.t5xxl = None self.t5xxl = None
logging.debug("Created SD3 text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}".format(clip_l, clip_g, t5, dtype_t5)) logging.debug("Created SD3 text encoder with: clip_l %s, clip_g %s, t5xxl %s:%s", clip_l, clip_g, t5, dtype_t5)
def set_clip_options(self, options): def set_clip_options(self, options):
if self.clip_l is not None: if self.clip_l is not None:

View File

@ -86,7 +86,7 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False):
if safe_load or ALWAYS_SAFE_LOAD: if safe_load or ALWAYS_SAFE_LOAD:
pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args) pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args)
else: else:
logging.warning("WARNING: loading {} unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.".format(ckpt)) logging.warning("WARNING: loading %s unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.", ckpt)
pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle)
if "state_dict" in pl_sd: if "state_dict" in pl_sd:
sd = pl_sd["state_dict"] sd = pl_sd["state_dict"]

View File

@ -111,5 +111,5 @@ class BOFTAdapter(WeightAdapterBase):
else: else:
weight += function((strength * lora_diff).type(weight.dtype)) weight += function((strength * lora_diff).type(weight.dtype))
except Exception as e: except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e)) logging.error("ERROR %s %s %s", self.name, key, e)
return weight return weight

View File

@ -89,5 +89,5 @@ class GLoRAAdapter(WeightAdapterBase):
else: else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e: except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e)) logging.error("ERROR %s %s %s", self.name, key, e)
return weight return weight

View File

@ -228,5 +228,5 @@ class LoHaAdapter(WeightAdapterBase):
else: else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e: except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e)) logging.error("ERROR %s %s %s", self.name, key, e)
return weight return weight

View File

@ -216,5 +216,5 @@ class LoKrAdapter(WeightAdapterBase):
else: else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e: except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e)) logging.error("ERROR %s %s %s", self.name, key, e)
return weight return weight

View File

@ -208,5 +208,5 @@ class LoRAAdapter(WeightAdapterBase):
else: else:
weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) weight += function(((strength * alpha) * lora_diff).type(weight.dtype))
except Exception as e: except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e)) logging.error("ERROR %s %s %s", self.name, key, e)
return weight return weight

View File

@ -157,5 +157,5 @@ class OFTAdapter(WeightAdapterBase):
else: else:
weight += function((strength * lora_diff).type(weight.dtype)) weight += function((strength * lora_diff).type(weight.dtype))
except Exception as e: except Exception as e:
logging.error("ERROR {} {} {}".format(self.name, key, e)) logging.error("ERROR %s %s %s", self.name, key, e)
return weight return weight

View File

@ -31,10 +31,10 @@ def generate_stubs_for_module(module_name: str) -> None:
if api_class: if api_class:
# Generate the stub file # Generate the stub file
AsyncToSyncConverter.generate_stub_file(api_class, sync_class) AsyncToSyncConverter.generate_stub_file(api_class, sync_class)
logging.info(f"Generated stub file for {module_name}") logging.info("Generated stub file for %s", module_name)
else: else:
logging.warning( logging.warning(
f"Module {module_name} has ComfyAPISync but no ComfyAPI" "Module %s has ComfyAPISync but no ComfyAPI", module_name
) )
elif hasattr(module, "ComfyAPI"): elif hasattr(module, "ComfyAPI"):
@ -46,14 +46,14 @@ def generate_stubs_for_module(module_name: str) -> None:
# Generate the stub file # Generate the stub file
AsyncToSyncConverter.generate_stub_file(api_class, sync_class) AsyncToSyncConverter.generate_stub_file(api_class, sync_class)
logging.info(f"Generated stub file for {module_name}") logging.info("Generated stub file for %s", module_name)
else: else:
logging.warning( logging.warning(
f"Module {module_name} does not export ComfyAPI or ComfyAPISync" "Module %s does not export ComfyAPI or ComfyAPISync", module_name
) )
except Exception as e: except Exception as e:
logging.error(f"Failed to generate stub for {module_name}: {e}") logging.error("Failed to generate stub for %s: %s", module_name, e)
import traceback import traceback
traceback.print_exc() traceback.print_exc()
@ -73,7 +73,7 @@ def main():
if module_name not in api_modules: if module_name not in api_modules:
api_modules.append(module_name) api_modules.append(module_name)
logging.info(f"Found {len(api_modules)} API modules: {api_modules}") logging.info("Found %s API modules: %s", len(api_modules), api_modules)
# Generate stubs for each module # Generate stubs for each module
for module_name in api_modules: for module_name in api_modules:

View File

@ -282,7 +282,7 @@ class AsyncToSyncConverter:
setattr(self._async_instance, attr_name, async_instance) setattr(self._async_instance, attr_name, async_instance)
except Exception as e: except Exception as e:
logging.warning( logging.warning(
f"Failed to create instance for {attr_name}: {e}" "Failed to create instance for %s: %s", attr_name, e
) )
# Handle other instance attributes that might not be annotated # Handle other instance attributes that might not be annotated
@ -962,7 +962,7 @@ class AsyncToSyncConverter:
seen.add(imp) seen.add(imp)
unique_imports.append(imp) unique_imports.append(imp)
else: else:
logging.warning(f"Duplicate import detected: {imp}") logging.warning("Duplicate import detected: %s", imp)
# Replace the placeholder with actual imports # Replace the placeholder with actual imports
stub_content[imports_placeholder_index : imports_placeholder_index + 1] = ( stub_content[imports_placeholder_index : imports_placeholder_index + 1] = (
@ -976,12 +976,12 @@ class AsyncToSyncConverter:
with open(sync_stub_path, "w") as f: with open(sync_stub_path, "w") as f:
f.write("\n".join(stub_content)) f.write("\n".join(stub_content))
logging.info(f"Generated stub file: {sync_stub_path}") logging.info("Generated stub file: %s", sync_stub_path)
except Exception as e: except Exception as e:
# If stub generation fails, log the error but don't break the main functionality # If stub generation fails, log the error but don't break the main functionality
logging.error( logging.error(
f"Error generating stub file for {sync_class.__name__}: {str(e)}" "Error generating stub file for %s: %s", sync_class.__name__, str(e)
) )
import traceback import traceback

View File

@ -405,11 +405,11 @@ def match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_
if sample_rate_1 > sample_rate_2: if sample_rate_1 > sample_rate_2:
waveform_2 = torchaudio.functional.resample(waveform_2, sample_rate_2, sample_rate_1) waveform_2 = torchaudio.functional.resample(waveform_2, sample_rate_2, sample_rate_1)
output_sample_rate = sample_rate_1 output_sample_rate = sample_rate_1
logging.info(f"Resampling audio2 from {sample_rate_2}Hz to {sample_rate_1}Hz for merging.") logging.info("Resampling audio2 from %sHz to %sHz for merging.", sample_rate_2, sample_rate_1)
else: else:
waveform_1 = torchaudio.functional.resample(waveform_1, sample_rate_1, sample_rate_2) waveform_1 = torchaudio.functional.resample(waveform_1, sample_rate_1, sample_rate_2)
output_sample_rate = sample_rate_2 output_sample_rate = sample_rate_2
logging.info(f"Resampling audio1 from {sample_rate_1}Hz to {sample_rate_2}Hz for merging.") logging.info("Resampling audio1 from %sHz to %sHz for merging.", sample_rate_1, sample_rate_2)
else: else:
output_sample_rate = sample_rate_1 output_sample_rate = sample_rate_1
return waveform_1, waveform_2, output_sample_rate return waveform_1, waveform_2, output_sample_rate
@ -495,10 +495,10 @@ class AudioMerge(IO.ComfyNode):
length_2 = waveform_2.shape[-1] length_2 = waveform_2.shape[-1]
if length_2 > length_1: if length_2 > length_1:
logging.info(f"AudioMerge: Trimming audio2 from {length_2} to {length_1} samples to match audio1 length.") logging.info("AudioMerge: Trimming audio2 from %s to %s samples to match audio1 length.", length_2, length_1)
waveform_2 = waveform_2[..., :length_1] waveform_2 = waveform_2[..., :length_1]
elif length_2 < length_1: elif length_2 < length_1:
logging.info(f"AudioMerge: Padding audio2 from {length_2} to {length_1} samples to match audio1 length.") logging.info("AudioMerge: Padding audio2 from %s to %s samples to match audio1 length.", length_2, length_1)
pad_shape = list(waveform_2.shape) pad_shape = list(waveform_2.shape)
pad_shape[-1] = length_1 - length_2 pad_shape[-1] = length_1 - length_2
pad_tensor = torch.zeros(pad_shape, dtype=waveform_2.dtype, device=waveform_2.device) pad_tensor = torch.zeros(pad_shape, dtype=waveform_2.dtype, device=waveform_2.device)

View File

@ -110,7 +110,7 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
@classmethod @classmethod
def execute(cls, folder): def execute(cls, folder):
logging.info(f"Loading images from folder: {folder}") logging.info("Loading images from folder: %s", folder)
sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder)
valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] valid_extensions = [".png", ".jpg", ".jpeg", ".webp"]
@ -149,7 +149,7 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode):
output_tensor = load_and_process_images(image_files, sub_input_dir) output_tensor = load_and_process_images(image_files, sub_input_dir)
logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") logging.info("Loaded %s images from %s.", len(output_tensor), sub_input_dir)
return io.NodeOutput(output_tensor, captions) return io.NodeOutput(output_tensor, captions)
@ -236,7 +236,7 @@ class SaveImageDataSetToFolderNode(io.ComfyNode):
output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) output_dir = os.path.join(folder_paths.get_output_directory(), folder_name)
saved_files = save_images_to_folder(images, output_dir, filename_prefix) saved_files = save_images_to_folder(images, output_dir, filename_prefix)
logging.info(f"Saved {len(saved_files)} images to {output_dir}.") logging.info("Saved %s images to %s.", len(saved_files), output_dir)
return io.NodeOutput() return io.NodeOutput()
@ -283,7 +283,7 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode):
with open(caption_path, "w", encoding="utf-8") as f: with open(caption_path, "w", encoding="utf-8") as f:
f.write(caption) f.write(caption)
logging.info(f"Saved {len(saved_files)} images and captions to {output_dir}.") logging.info("Saved %s images and captions to %s.", len(saved_files), output_dir)
return io.NodeOutput() return io.NodeOutput()
@ -1002,7 +1002,7 @@ class ImageDeduplicationNode(ImageProcessingNode):
if similarity >= similarity_threshold: if similarity >= similarity_threshold:
is_duplicate = True is_duplicate = True
logging.info( logging.info(
f"Image {i} is similar to image {j} (similarity: {similarity:.3f}), skipping" "Image %d is similar to image %d (similarity: %.3f), skipping", i, j, similarity
) )
break break
@ -1012,7 +1012,7 @@ class ImageDeduplicationNode(ImageProcessingNode):
# Return only unique images # Return only unique images
unique_images = [images[i] for i in keep_indices] unique_images = [images[i] for i in keep_indices]
logging.info( logging.info(
f"Deduplication: kept {len(unique_images)} out of {len(images)} images" "Deduplication: kept %d out of %d images", len(unique_images), len(images)
) )
return unique_images return unique_images
@ -1086,7 +1086,7 @@ class ImageGridNode(ImageProcessingNode):
grid.paste(img, (x, y)) grid.paste(img, (x, y))
logging.info( logging.info(
f"Created {columns}x{rows} grid with {num_images} images ({grid_width}x{grid_height})" "Created %d x %d grid with %d images (%d x %d)", columns, rows, num_images, grid_width, grid_height
) )
return pil_to_tensor(grid) return pil_to_tensor(grid)
@ -1104,7 +1104,7 @@ class MergeImageListsNode(ImageProcessingNode):
"""Simply return the images list (already merged by input handling).""" """Simply return the images list (already merged by input handling)."""
# When multiple list inputs are connected, they're concatenated # When multiple list inputs are connected, they're concatenated
# For now, this is a simple pass-through # For now, this is a simple pass-through
logging.info(f"Merged image list contains {len(images)} images") logging.info("Merged image list contains %s images", len(images))
return images return images
@ -1121,7 +1121,7 @@ class MergeTextListsNode(TextProcessingNode):
"""Simply return the texts list (already merged by input handling).""" """Simply return the texts list (already merged by input handling)."""
# When multiple list inputs are connected, they're concatenated # When multiple list inputs are connected, they're concatenated
# For now, this is a simple pass-through # For now, this is a simple pass-through
logging.info(f"Merged text list contains {len(texts)} texts") logging.info("Merged text list contains %s texts", len(texts))
return texts return texts
@ -1214,10 +1214,10 @@ class ResolutionBucket(io.ComfyNode):
output_conditions.append(bucket_data["conditions"]) output_conditions.append(bucket_data["conditions"])
logging.info( logging.info(
f"Resolution bucket ({h}x{w}): {len(bucket_data['latents'])} samples" "Resolution bucket (%d x %d): %d samples", h, w, len(bucket_data["latents"])
) )
logging.info(f"Created {len(buckets)} resolution buckets from {len(flat_latents)} samples") logging.info("Created %s resolution buckets from %s samples", len(buckets), len(flat_latents))
return io.NodeOutput(output_latents, output_conditions) return io.NodeOutput(output_latents, output_conditions)
@ -1283,7 +1283,7 @@ class MakeTrainingDataset(io.ComfyNode):
) )
# Encode images with VAE # Encode images with VAE
logging.info(f"Encoding {num_images} images with VAE...") logging.info("Encoding %s images with VAE...", num_images)
latents_list = [] # list[{"samples": tensor}] latents_list = [] # list[{"samples": tensor}]
for img_tensor in images: for img_tensor in images:
# img_tensor is [1, H, W, 3] # img_tensor is [1, H, W, 3]
@ -1291,7 +1291,7 @@ class MakeTrainingDataset(io.ComfyNode):
latents_list.append({"samples": latent_tensor}) latents_list.append({"samples": latent_tensor})
# Encode texts with CLIP # Encode texts with CLIP
logging.info(f"Encoding {len(texts)} texts with CLIP...") logging.info("Encoding %s texts with CLIP...", len(texts))
conditioning_list = [] # list[list[cond]] conditioning_list = [] # list[list[cond]]
for text in texts: for text in texts:
if text == "": if text == "":
@ -1302,7 +1302,7 @@ class MakeTrainingDataset(io.ComfyNode):
conditioning_list.append(cond) conditioning_list.append(cond)
logging.info( logging.info(
f"Created dataset with {len(latents_list)} latents and {len(conditioning_list)} conditioning." "Created dataset with %d latents and %d conditioning.", len(latents_list), len(conditioning_list)
) )
return io.NodeOutput(latents_list, conditioning_list) return io.NodeOutput(latents_list, conditioning_list)
@ -1369,7 +1369,7 @@ class SaveTrainingDataset(io.ComfyNode):
num_shards = (num_samples + shard_size - 1) // shard_size # Ceiling division num_shards = (num_samples + shard_size - 1) // shard_size # Ceiling division
logging.info( logging.info(
f"Saving {num_samples} samples to {num_shards} shards in {output_dir}..." "Saving %d samples to %d shards in %s...", num_samples, num_shards, output_dir
) )
# Save data in shards # Save data in shards
@ -1391,7 +1391,7 @@ class SaveTrainingDataset(io.ComfyNode):
torch.save(shard_data, f) torch.save(shard_data, f)
logging.info( logging.info(
f"Saved shard {shard_idx + 1}/{num_shards}: {shard_filename} ({end_idx - start_idx} samples)" "Saved shard %d/%d: %s (%d samples)", shard_idx + 1, num_shards, shard_filename, end_idx - start_idx
) )
# Save metadata # Save metadata
@ -1404,7 +1404,7 @@ class SaveTrainingDataset(io.ComfyNode):
with open(metadata_path, "w") as f: with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=2) json.dump(metadata, f, indent=2)
logging.info(f"Successfully saved {num_samples} samples to {output_dir}.") logging.info("Successfully saved %s samples to %s.", num_samples, output_dir)
return io.NodeOutput() return io.NodeOutput()
@ -1459,7 +1459,7 @@ class LoadTrainingDataset(io.ComfyNode):
if not shard_files: if not shard_files:
raise ValueError(f"No shard files found in {dataset_dir}") raise ValueError(f"No shard files found in {dataset_dir}")
logging.info(f"Loading {len(shard_files)} shards from {dataset_dir}...") logging.info("Loading %s shards from %s...", len(shard_files), dataset_dir)
# Load all shards # Load all shards
all_latents = [] # list[{"samples": tensor}] all_latents = [] # list[{"samples": tensor}]
@ -1474,10 +1474,10 @@ class LoadTrainingDataset(io.ComfyNode):
all_latents.extend(shard_data["latents"]) all_latents.extend(shard_data["latents"])
all_conditioning.extend(shard_data["conditioning"]) all_conditioning.extend(shard_data["conditioning"])
logging.info(f"Loaded {shard_file}: {len(shard_data['latents'])} samples") logging.info("Loaded %s: %s samples", shard_file, len(shard_data['latents']))
logging.info( logging.info(
f"Successfully loaded {len(all_latents)} samples from {dataset_dir}." "Successfully loaded %d samples from %s.", len(all_latents), dataset_dir
) )
return io.NodeOutput(all_latents, all_conditioning) return io.NodeOutput(all_latents, all_conditioning)

View File

@ -32,7 +32,7 @@ def easycache_forward_wrapper(executor, *args, **kwargs):
# if first cond marked this step for skipping, skip it and use appropriate cached values # if first cond marked this step for skipping, skip it and use appropriate cached values
if easycache.skip_current_step: if easycache.skip_current_step:
if easycache.verbose: if easycache.verbose:
logging.info(f"EasyCache [verbose] - was marked to skip this step by {easycache.first_cond_uuid}. Present uuids: {uuids}") logging.info("EasyCache [verbose] - was marked to skip this step by %s. Present uuids: %s", easycache.first_cond_uuid, uuids)
return easycache.apply_cache_diff(x, uuids) return easycache.apply_cache_diff(x, uuids)
if easycache.initial_step: if easycache.initial_step:
easycache.first_cond_uuid = uuids[0] easycache.first_cond_uuid = uuids[0]
@ -46,13 +46,13 @@ def easycache_forward_wrapper(executor, *args, **kwargs):
easycache.cumulative_change_rate += approx_output_change_rate easycache.cumulative_change_rate += approx_output_change_rate
if easycache.cumulative_change_rate < easycache.reuse_threshold: if easycache.cumulative_change_rate < easycache.reuse_threshold:
if easycache.verbose: if easycache.verbose:
logging.info(f"EasyCache [verbose] - skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") logging.info("EasyCache [verbose] - skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold)
# other conds should also skip this step, and instead use their cached values # other conds should also skip this step, and instead use their cached values
easycache.skip_current_step = True easycache.skip_current_step = True
return easycache.apply_cache_diff(x, uuids) return easycache.apply_cache_diff(x, uuids)
else: else:
if easycache.verbose: if easycache.verbose:
logging.info(f"EasyCache [verbose] - NOT skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") logging.info("EasyCache [verbose] - NOT skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold)
easycache.cumulative_change_rate = 0.0 easycache.cumulative_change_rate = 0.0
output: torch.Tensor = executor(*args, **kwargs) output: torch.Tensor = executor(*args, **kwargs)
@ -65,11 +65,11 @@ def easycache_forward_wrapper(executor, *args, **kwargs):
approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm
easycache.approx_output_change_rates.append(approx_output_change_rate.item()) easycache.approx_output_change_rates.append(approx_output_change_rate.item())
if easycache.verbose: if easycache.verbose:
logging.info(f"EasyCache [verbose] - approx_output_change_rate: {approx_output_change_rate}") logging.info("EasyCache [verbose] - approx_output_change_rate: %s", approx_output_change_rate)
if input_change is not None: if input_change is not None:
easycache.relative_transformation_rate = output_change / input_change easycache.relative_transformation_rate = output_change / input_change
if easycache.verbose: if easycache.verbose:
logging.info(f"EasyCache [verbose] - output_change_rate: {output_change_rate}") logging.info("EasyCache [verbose] - output_change_rate: %s", output_change_rate)
# TODO: allow cache_diff to be offloaded # TODO: allow cache_diff to be offloaded
easycache.update_cache_diff(output, next_x_prev, uuids) easycache.update_cache_diff(output, next_x_prev, uuids)
if has_first_cond_uuid: if has_first_cond_uuid:
@ -77,7 +77,7 @@ def easycache_forward_wrapper(executor, *args, **kwargs):
easycache.output_prev_subsampled = easycache.subsample(output, uuids) easycache.output_prev_subsampled = easycache.subsample(output, uuids)
easycache.output_prev_norm = output.flatten().abs().mean() easycache.output_prev_norm = output.flatten().abs().mean()
if easycache.verbose: if easycache.verbose:
logging.info(f"EasyCache [verbose] - x_prev_subsampled: {easycache.x_prev_subsampled.shape}") logging.info("EasyCache [verbose] - x_prev_subsampled: %s", easycache.x_prev_subsampled.shape)
return output return output
def lazycache_predict_noise_wrapper(executor, *args, **kwargs): def lazycache_predict_noise_wrapper(executor, *args, **kwargs):
@ -102,13 +102,13 @@ def lazycache_predict_noise_wrapper(executor, *args, **kwargs):
easycache.cumulative_change_rate += approx_output_change_rate easycache.cumulative_change_rate += approx_output_change_rate
if easycache.cumulative_change_rate < easycache.reuse_threshold: if easycache.cumulative_change_rate < easycache.reuse_threshold:
if easycache.verbose: if easycache.verbose:
logging.info(f"LazyCache [verbose] - skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") logging.info("LazyCache [verbose] - skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold)
# other conds should also skip this step, and instead use their cached values # other conds should also skip this step, and instead use their cached values
easycache.skip_current_step = True easycache.skip_current_step = True
return easycache.apply_cache_diff(x) return easycache.apply_cache_diff(x)
else: else:
if easycache.verbose: if easycache.verbose:
logging.info(f"LazyCache [verbose] - NOT skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") logging.info("LazyCache [verbose] - NOT skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold)
easycache.cumulative_change_rate = 0.0 easycache.cumulative_change_rate = 0.0
output: torch.Tensor = executor(*args, **kwargs) output: torch.Tensor = executor(*args, **kwargs)
if easycache.has_output_prev_norm(): if easycache.has_output_prev_norm():
@ -120,18 +120,18 @@ def lazycache_predict_noise_wrapper(executor, *args, **kwargs):
approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm
easycache.approx_output_change_rates.append(approx_output_change_rate.item()) easycache.approx_output_change_rates.append(approx_output_change_rate.item())
if easycache.verbose: if easycache.verbose:
logging.info(f"LazyCache [verbose] - approx_output_change_rate: {approx_output_change_rate}") logging.info("LazyCache [verbose] - approx_output_change_rate: %s", approx_output_change_rate)
if input_change is not None: if input_change is not None:
easycache.relative_transformation_rate = output_change / input_change easycache.relative_transformation_rate = output_change / input_change
if easycache.verbose: if easycache.verbose:
logging.info(f"LazyCache [verbose] - output_change_rate: {output_change_rate}") logging.info("LazyCache [verbose] - output_change_rate: %s", output_change_rate)
# TODO: allow cache_diff to be offloaded # TODO: allow cache_diff to be offloaded
easycache.update_cache_diff(output, next_x_prev) easycache.update_cache_diff(output, next_x_prev)
easycache.x_prev_subsampled = easycache.subsample(next_x_prev) easycache.x_prev_subsampled = easycache.subsample(next_x_prev)
easycache.output_prev_subsampled = easycache.subsample(output) easycache.output_prev_subsampled = easycache.subsample(output)
easycache.output_prev_norm = output.flatten().abs().mean() easycache.output_prev_norm = output.flatten().abs().mean()
if easycache.verbose: if easycache.verbose:
logging.info(f"LazyCache [verbose] - x_prev_subsampled: {easycache.x_prev_subsampled.shape}") logging.info("LazyCache [verbose] - x_prev_subsampled: %s", easycache.x_prev_subsampled.shape)
return output return output
def easycache_calc_cond_batch_wrapper(executor, *args, **kwargs): def easycache_calc_cond_batch_wrapper(executor, *args, **kwargs):
@ -152,22 +152,22 @@ def easycache_sample_wrapper(executor, *args, **kwargs):
# clone and prepare timesteps # clone and prepare timesteps
guider.model_options["transformer_options"]["easycache"] = guider.model_options["transformer_options"]["easycache"].clone().prepare_timesteps(guider.model_patcher.model.model_sampling) guider.model_options["transformer_options"]["easycache"] = guider.model_options["transformer_options"]["easycache"].clone().prepare_timesteps(guider.model_patcher.model.model_sampling)
easycache: Union[EasyCacheHolder, LazyCacheHolder] = guider.model_options['transformer_options']['easycache'] easycache: Union[EasyCacheHolder, LazyCacheHolder] = guider.model_options['transformer_options']['easycache']
logging.info(f"{easycache.name} enabled - threshold: {easycache.reuse_threshold}, start_percent: {easycache.start_percent}, end_percent: {easycache.end_percent}") logging.info("%s enabled - threshold: %s, start_percent: %s, end_percent: %s", easycache.name, easycache.reuse_threshold, easycache.start_percent, easycache.end_percent)
return executor(*args, **kwargs) return executor(*args, **kwargs)
finally: finally:
easycache = guider.model_options['transformer_options']['easycache'] easycache = guider.model_options['transformer_options']['easycache']
output_change_rates = easycache.output_change_rates output_change_rates = easycache.output_change_rates
approx_output_change_rates = easycache.approx_output_change_rates approx_output_change_rates = easycache.approx_output_change_rates
if easycache.verbose: if easycache.verbose:
logging.info(f"{easycache.name} [verbose] - output_change_rates {len(output_change_rates)}: {output_change_rates}") logging.info("%s [verbose] - output_change_rates %s: %s", easycache.name, len(output_change_rates), output_change_rates)
logging.info(f"{easycache.name} [verbose] - approx_output_change_rates {len(approx_output_change_rates)}: {approx_output_change_rates}") logging.info("%s [verbose] - approx_output_change_rates %s: %s", easycache.name, len(approx_output_change_rates), approx_output_change_rates)
total_steps = len(args[3])-1 total_steps = len(args[3])-1
# catch division by zero for log statement; sucks to crash after all sampling is done # catch division by zero for log statement; sucks to crash after all sampling is done
try: try:
speedup = total_steps/(total_steps-easycache.total_steps_skipped) speedup = total_steps/(total_steps-easycache.total_steps_skipped)
except ZeroDivisionError: except ZeroDivisionError:
speedup = 1.0 speedup = 1.0
logging.info(f"{easycache.name} - skipped {easycache.total_steps_skipped}/{total_steps} steps ({speedup:.2f}x speedup).") logging.info("%s - skipped %s/%s steps (%.2fx speedup).", easycache.name, easycache.total_steps_skipped, total_steps, speedup)
easycache.reset() easycache.reset()
guider.model_options = orig_model_options guider.model_options = orig_model_options
@ -300,7 +300,7 @@ class EasyCacheHolder:
return True return True
if metadata == self.state_metadata: if metadata == self.state_metadata:
return True return True
logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state") logging.warning("%s - Tensor shape, dtype or device changed, resetting state", self.name)
self.reset() self.reset()
return False return False
@ -435,7 +435,7 @@ class LazyCacheHolder:
return True return True
if metadata == self.state_metadata: if metadata == self.state_metadata:
return True return True
logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state") logging.warning("%s - Tensor shape, dtype or device changed, resetting state", self.name)
self.reset() self.reset()
return False return False

View File

@ -56,7 +56,7 @@ class FreeU(IO.ComfyNode):
try: try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except: except:
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) logging.warning("Device %s does not support the torch.fft functions used in the FreeU node, switching to CPU.", hsp.device)
on_cpu_devices[hsp.device] = True on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else: else:
@ -110,7 +110,7 @@ class FreeU_V2(IO.ComfyNode):
try: try:
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
except: except:
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) logging.warning("Device %s does not support the torch.fft functions used in the FreeU node, switching to CPU.", hsp.device)
on_cpu_devices[hsp.device] = True on_cpu_devices[hsp.device] = True
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
else: else:

View File

@ -540,7 +540,7 @@ class CreateHookKeyframesInterpolated:
is_first = False is_first = False
prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps))
if print_keyframes: if print_keyframes:
logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}") logging.info("Hook Keyframe - start_percent:%s = %s", percent, strength)
return (prev_hook_kf,) return (prev_hook_kf,)
class CreateHookKeyframesFromFloats: class CreateHookKeyframesFromFloats:
@ -589,7 +589,7 @@ class CreateHookKeyframesFromFloats:
is_first = False is_first = False
prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps))
if print_keyframes: if print_keyframes:
logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}") logging.info("Hook Keyframe - start_percent:%s = %s", percent, strength)
return (prev_hook_kf,) return (prev_hook_kf,)
#------------------------------------------ #------------------------------------------
########################################### ###########################################

View File

@ -27,7 +27,7 @@ def load_hypernetwork_patch(path, strength):
} }
if activation_func not in valid_activation: if activation_func not in valid_activation:
logging.error("Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format(path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)) logging.error("Unsupported Hypernetwork format, if you report it I might implement it. %s %s %s %s %s %s", path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)
return None return None
out = {} out = {}

View File

@ -456,10 +456,10 @@ class ReplaceVideoLatentFrames(io.ComfyNode):
if index < 0: if index < 0:
index = dest_frames + index index = dest_frames + index
if index > dest_frames: if index > dest_frames:
logging.warning(f"ReplaceVideoLatentFrames: Index {index} is out of bounds for destination latent frames {dest_frames}.") logging.warning("ReplaceVideoLatentFrames: Index %s is out of bounds for destination latent frames %s.", index, dest_frames)
return io.NodeOutput(destination) return io.NodeOutput(destination)
if index + source_frames > dest_frames: if index + source_frames > dest_frames:
logging.warning(f"ReplaceVideoLatentFrames: Source latent frames {source_frames} do not fit within destination latent frames {dest_frames} at the specified index {index}.") logging.warning("ReplaceVideoLatentFrames: Source latent frames %s do not fit within destination latent frames %s at the specified index %s.", source_frames, dest_frames, index)
return io.NodeOutput(destination) return io.NodeOutput(destination)
s = source.copy() s = source.copy()
s_source = source["samples"] s_source = source["samples"]

View File

@ -65,7 +65,7 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora
output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu()
output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu()
except: except:
logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) logging.warning("Could not generate lora weights for key %s, is the weight difference a zero?", k)
elif lora_type == LORAType.FULL_DIFF: elif lora_type == LORAType.FULL_DIFF:
output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()

View File

@ -390,7 +390,7 @@ def find_all_highest_child_module_with_forward(
model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict) model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)
): ):
result.append(model) result.append(model)
logging.debug(f"Found module with forward: {name} ({model.__class__.__name__})") logging.debug("Found module with forward: %s (%s)", name, model.__class__.__name__)
return result return result
name = name or "root" name = name or "root"
for next_name, child in model.named_children(): for next_name, child in model.named_children():
@ -498,9 +498,9 @@ def _prepare_latents_and_count(latents, dtype, bucket_mode):
num_images = sum(t.shape[0] for t in latents) num_images = sum(t.shape[0] for t in latents)
multi_res = False # Not using multi_res path in bucket mode multi_res = False # Not using multi_res path in bucket mode
logging.info(f"Bucket mode: {num_buckets} buckets, {num_images} total samples") logging.info("Bucket mode: %s buckets, %s total samples", num_buckets, num_images)
for i, lat in enumerate(latents): for i, lat in enumerate(latents):
logging.info(f" Bucket {i}: shape {lat.shape}") logging.info(" Bucket %s: shape %s", i, lat.shape)
return latents, num_images, multi_res return latents, num_images, multi_res
# Non-bucket mode # Non-bucket mode
@ -509,7 +509,7 @@ def _prepare_latents_and_count(latents, dtype, bucket_mode):
latents = [t.to(dtype) for t in latents] latents = [t.to(dtype) for t in latents]
for latent in latents: for latent in latents:
all_shapes.add(latent.shape) all_shapes.add(latent.shape)
logging.info(f"Latent shapes: {all_shapes}") logging.info("Latent shapes: %s", all_shapes)
if len(all_shapes) > 1: if len(all_shapes) > 1:
multi_res = True multi_res = True
else: else:
@ -521,7 +521,7 @@ def _prepare_latents_and_count(latents, dtype, bucket_mode):
num_images = latents.shape[0] num_images = latents.shape[0]
multi_res = False multi_res = False
else: else:
logging.error(f"Invalid latents type: {type(latents)}") logging.error("Invalid latents type: %s", type(latents))
num_images = 0 num_images = 0
multi_res = False multi_res = False
@ -545,7 +545,7 @@ def _validate_and_expand_conditioning(positive, num_images, bucket_mode):
if bucket_mode: if bucket_mode:
return positive # Skip validation in bucket mode return positive # Skip validation in bucket mode
logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") logging.info("Total Images: %s, Total Captions: %s", num_images, len(positive))
if len(positive) == 1 and num_images > 1: if len(positive) == 1 and num_images > 1:
return positive * num_images return positive * num_images
elif len(positive) != num_images: elif len(positive) != num_images:

View File

@ -83,7 +83,7 @@ class IsChangedCache:
is_changed = await resolve_map_node_over_list_results(is_changed) is_changed = await resolve_map_node_over_list_results(is_changed)
node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed] node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed]
except Exception as e: except Exception as e:
logging.warning("WARNING: {}".format(e)) logging.warning("WARNING: %s", e)
node["is_changed"] = float("NaN") node["is_changed"] = float("NaN")
finally: finally:
self.is_changed[node_id] = node["is_changed"] self.is_changed[node_id] = node["is_changed"]
@ -595,19 +595,19 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
for name, inputs in input_data_all.items(): for name, inputs in input_data_all.items():
input_data_formatted[name] = [format_value(x) for x in inputs] input_data_formatted[name] = [format_value(x) for x in inputs]
logging.error(f"!!! Exception during processing !!! {ex}") logging.error("!!! Exception during processing !!! %s", ex)
logging.error(traceback.format_exc()) logging.error(traceback.format_exc())
tips = "" tips = ""
if isinstance(ex, comfy.model_management.OOM_EXCEPTION): if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number." tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number."
logging.info("Memory summary: {}".format(comfy.model_management.debug_memory_summary())) logging.info("Memory summary: %s", comfy.model_management.debug_memory_summary())
logging.error("Got an OOM, unloading all loaded models.") logging.error("Got an OOM, unloading all loaded models.")
comfy.model_management.unload_all_models() comfy.model_management.unload_all_models()
error_details = { error_details = {
"node_id": real_node_id, "node_id": real_node_id,
"exception_message": "{}\n{}".format(ex, tips), "exception_message": "%s\n%s" % (ex, tips),
"exception_type": exception_type, "exception_type": exception_type,
"traceback": traceback.format_tb(tb), "traceback": traceback.format_tb(tb),
"current_inputs": input_data_formatted "current_inputs": input_data_formatted
@ -1061,11 +1061,11 @@ async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[
if valid is True: if valid is True:
good_outputs.add(o) good_outputs.add(o)
else: else:
logging.error(f"Failed to validate prompt for output {o}:") logging.error("Failed to validate prompt for output %s:", o)
if len(reasons) > 0: if len(reasons) > 0:
logging.error("* (prompt):") logging.error("* (prompt):")
for reason in reasons: for reason in reasons:
logging.error(f" - {reason['message']}: {reason['details']}") logging.error(" - %s: %s", reason['message'], reason['details'])
errors += [(o, reasons)] errors += [(o, reasons)]
for node_id, result in validated.items(): for node_id, result in validated.items():
valid = result[0] valid = result[0]
@ -1081,9 +1081,9 @@ async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[
"dependent_outputs": [], "dependent_outputs": [],
"class_type": class_type "class_type": class_type
} }
logging.error(f"* {class_type} {node_id}:") logging.error("* %s %s:", class_type, node_id)
for reason in reasons: for reason in reasons:
logging.error(f" - {reason['message']}: {reason['details']}") logging.error(" - %s: %s", reason['message'], reason['details'])
node_errors[node_id]["dependent_outputs"].append(o) node_errors[node_id]["dependent_outputs"].append(o)
logging.error("Output will be ignored") logging.error("Output will be ignored")

View File

@ -314,9 +314,9 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None)
try: try:
dirs[directory] = os.path.getmtime(directory) dirs[directory] = os.path.getmtime(directory)
except FileNotFoundError: except FileNotFoundError:
logging.warning(f"Warning: Unable to access {directory}. Skipping this path.") logging.warning("Warning: Unable to access %s. Skipping this path.", directory)
logging.debug("recursive file list on directory {}".format(directory)) logging.debug("recursive file list on directory %s", directory)
dirpath: str dirpath: str
subdirs: list[str] subdirs: list[str]
filenames: list[str] filenames: list[str]
@ -328,7 +328,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None)
relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
result.append(relative_path) result.append(relative_path)
except: except:
logging.warning(f"Warning: Unable to access {file_name}. Skipping this file.") logging.warning("Warning: Unable to access %s. Skipping this file.", file_name)
continue continue
for d in subdirs: for d in subdirs:
@ -336,9 +336,9 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None)
try: try:
dirs[path] = os.path.getmtime(path) dirs[path] = os.path.getmtime(path)
except FileNotFoundError: except FileNotFoundError:
logging.warning(f"Warning: Unable to access {path}. Skipping this path.") logging.warning("Warning: Unable to access %s. Skipping this path.", path)
continue continue
logging.debug("found {} files".format(len(result))) logging.debug("found %d files", len(result))
return result, dirs return result, dirs
def filter_files_extensions(files: Collection[str], extensions: Collection[str]) -> list[str]: def filter_files_extensions(files: Collection[str], extensions: Collection[str]) -> list[str]:
@ -361,7 +361,7 @@ def get_full_path(folder_name: str, filename: str) -> str | None:
if os.path.isfile(full_path): if os.path.isfile(full_path):
return full_path return full_path
elif os.path.islink(full_path): elif os.path.islink(full_path):
logging.warning("WARNING path {} exists but doesn't link anywhere, skipping.".format(full_path)) logging.warning("WARNING path %s exists but doesn't link anywhere, skipping.", full_path)
return None return None

View File

@ -102,7 +102,7 @@ def get_previewer(device, latent_format):
taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device)
previewer = TAESDPreviewerImpl(taesd) previewer = TAESDPreviewerImpl(taesd)
else: else:
logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name)) logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/%s", latent_format.taesd_decoder_name)
if previewer is None: if previewer is None:
if latent_format.latent_rgb_factors is not None: if latent_format.latent_rgb_factors is not None:

36
main.py
View File

@ -41,12 +41,11 @@ if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device)
os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device)
logging.info("Set cuda device to: {}".format(args.cuda_device)) logging.info("Set cuda device to: %s", args.cuda_device)
if args.oneapi_device_selector is not None: if args.oneapi_device_selector is not None:
os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector
logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) logging.info("Set oneapi device selector to: %s", args.oneapi_device_selector)
if args.deterministic: if args.deterministic:
if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
@ -58,7 +57,12 @@ if __name__ == "__main__":
def handle_comfyui_manager_unavailable(): def handle_comfyui_manager_unavailable():
if not args.windows_standalone_build: if not args.windows_standalone_build:
logging.warning(f"\n\nYou appear to be running comfyui-manager from source, this is not recommended. Please install comfyui-manager using the following command:\ncommand:\n\t{sys.executable} -m pip install --pre comfyui_manager\n") logging.warning("""
You appear to be running comfyui-manager from source, this is not recommended. Please install comfyui-manager using the following command:
command:
%s -m pip install --pre comfyui_manager
""", sys.executable)
args.enable_manager = False args.enable_manager = False
@ -85,7 +89,7 @@ def apply_custom_paths():
# --output-directory, --input-directory, --user-directory # --output-directory, --input-directory, --user-directory
if args.output_directory: if args.output_directory:
output_dir = os.path.abspath(args.output_directory) output_dir = os.path.abspath(args.output_directory)
logging.info(f"Setting output directory to: {output_dir}") logging.info("Setting output directory to: %s", output_dir)
folder_paths.set_output_directory(output_dir) folder_paths.set_output_directory(output_dir)
# These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes # These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
@ -98,12 +102,12 @@ def apply_custom_paths():
if args.input_directory: if args.input_directory:
input_dir = os.path.abspath(args.input_directory) input_dir = os.path.abspath(args.input_directory)
logging.info(f"Setting input directory to: {input_dir}") logging.info("Setting input directory to: %s", input_dir)
folder_paths.set_input_directory(input_dir) folder_paths.set_input_directory(input_dir)
if args.user_directory: if args.user_directory:
user_dir = os.path.abspath(args.user_directory) user_dir = os.path.abspath(args.user_directory)
logging.info(f"Setting user directory to: {user_dir}") logging.info("Setting user directory to: %s", user_dir)
folder_paths.set_user_directory(user_dir) folder_paths.set_user_directory(user_dir)
@ -119,7 +123,7 @@ def execute_prestartup_script():
spec.loader.exec_module(module) spec.loader.exec_module(module)
return True return True
except Exception as e: except Exception as e:
logging.error(f"Failed to execute startup-script: {script_path} / {e}") logging.error("Failed to execute startup-script: %s / %s", script_path, e)
return False return False
node_paths = folder_paths.get_folder_paths("custom_nodes") node_paths = folder_paths.get_folder_paths("custom_nodes")
@ -140,7 +144,7 @@ def execute_prestartup_script():
script_path = os.path.join(module_path, "prestartup_script.py") script_path = os.path.join(module_path, "prestartup_script.py")
if os.path.exists(script_path): if os.path.exists(script_path):
if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes:
logging.info(f"Prestartup Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") logging.info("Prestartup Skipping %s due to disable_all_custom_nodes and whitelist_custom_nodes", possible_module)
continue continue
time_before = time.perf_counter() time_before = time.perf_counter()
success = execute_script(script_path) success = execute_script(script_path)
@ -152,7 +156,7 @@ def execute_prestartup_script():
import_message = "" import_message = ""
else: else:
import_message = " (PRESTARTUP FAILED)" import_message = " (PRESTARTUP FAILED)"
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) logging.info("%6.1f seconds%s: %s", n[0], import_message, n[1])
logging.info("") logging.info("")
apply_custom_paths() apply_custom_paths()
@ -246,9 +250,9 @@ def prompt_worker(q, server_instance):
# Log Time in a more readable way after 10 minutes # Log Time in a more readable way after 10 minutes
if execution_time > 600: if execution_time > 600:
execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time)) execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time))
logging.info(f"Prompt executed in {execution_time}") logging.info("Prompt executed in %s", execution_time)
else: else:
logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) logging.info("Prompt executed in %.2f seconds", execution_time)
flags = q.get_flags() flags = q.get_flags()
free_memory = flags.get("free_memory", False) free_memory = flags.get("free_memory", False)
@ -325,7 +329,7 @@ def setup_database():
if dependencies_available(): if dependencies_available():
init_db() init_db()
except Exception as e: except Exception as e:
logging.error(f"Failed to initialize database. Please ensure you have installed the latest requirements. If the error persists, please report this as in future the database will be required: {e}") logging.error("Failed to initialize database. Please ensure you have installed the latest requirements. If the error persists, please report this as in future the database will be required: %s", e)
def start_comfyui(asyncio_loop=None): def start_comfyui(asyncio_loop=None):
@ -335,7 +339,7 @@ def start_comfyui(asyncio_loop=None):
""" """
if args.temp_directory: if args.temp_directory:
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp") temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
logging.info(f"Setting temp directory to: {temp_dir}") logging.info("Setting temp directory to: %s", temp_dir)
folder_paths.set_temp_directory(temp_dir) folder_paths.set_temp_directory(temp_dir)
cleanup_temp() cleanup_temp()
@ -394,8 +398,8 @@ def start_comfyui(asyncio_loop=None):
if __name__ == "__main__": if __name__ == "__main__":
# Running directly, just start ComfyUI. # Running directly, just start ComfyUI.
logging.info("Python version: {}".format(sys.version)) logging.info("Python version: %s", sys.version)
logging.info("ComfyUI version: {}".format(comfyui_version.__version__)) logging.info("ComfyUI version: %s", comfyui_version.__version__)
if sys.version_info.major == 3 and sys.version_info.minor < 10: if sys.version_info.major == 3 and sys.version_info.minor < 10:
logging.warning("WARNING: You are using a python version older than 3.10, please upgrade to a newer one. 3.12 and above is recommended.") logging.warning("WARNING: You are using a python version older than 3.10, please upgrade to a newer one. 3.12 and above is recommended.")

View File

@ -2142,7 +2142,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
sys_module_name = module_path.replace(".", "_x_") sys_module_name = module_path.replace(".", "_x_")
try: try:
logging.debug("Trying to load custom node {}".format(module_path)) logging.debug("Trying to load custom node %s", module_path)
if os.path.isfile(module_path): if os.path.isfile(module_path):
module_spec = importlib.util.spec_from_file_location(sys_module_name, module_path) module_spec = importlib.util.spec_from_file_location(sys_module_name, module_path)
module_dir = os.path.split(module_path)[0] module_dir = os.path.split(module_path)[0]
@ -2171,9 +2171,9 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
EXTENSION_WEB_DIRS[project_name] = web_dir_path EXTENSION_WEB_DIRS[project_name] = web_dir_path
logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) logging.info("Automatically register web folder %s for %s", web_dir_name, project_name)
except Exception as e: except Exception as e:
logging.warning(f"Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': {e}") logging.warning("Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': %s", e)
if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None:
web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY")))
@ -2193,7 +2193,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
elif hasattr(module, "comfy_entrypoint"): elif hasattr(module, "comfy_entrypoint"):
entrypoint = getattr(module, "comfy_entrypoint") entrypoint = getattr(module, "comfy_entrypoint")
if not callable(entrypoint): if not callable(entrypoint):
logging.warning(f"comfy_entrypoint in {module_path} is not callable, skipping.") logging.warning("comfy_entrypoint in %s is not callable, skipping.", module_path)
return False return False
try: try:
if inspect.iscoroutinefunction(entrypoint): if inspect.iscoroutinefunction(entrypoint):
@ -2201,11 +2201,11 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
else: else:
extension = entrypoint() extension = entrypoint()
if not isinstance(extension, ComfyExtension): if not isinstance(extension, ComfyExtension):
logging.warning(f"comfy_entrypoint in {module_path} did not return a ComfyExtension, skipping.") logging.warning("comfy_entrypoint in %s did not return a ComfyExtension, skipping.", module_path)
return False return False
node_list = await extension.get_node_list() node_list = await extension.get_node_list()
if not isinstance(node_list, list): if not isinstance(node_list, list):
logging.warning(f"comfy_entrypoint in {module_path} did not return a list of nodes, skipping.") logging.warning("comfy_entrypoint in %s did not return a list of nodes, skipping.", module_path)
return False return False
for node_cls in node_list: for node_cls in node_list:
node_cls: io.ComfyNode node_cls: io.ComfyNode
@ -2217,14 +2217,14 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
NODE_DISPLAY_NAME_MAPPINGS[schema.node_id] = schema.display_name NODE_DISPLAY_NAME_MAPPINGS[schema.node_id] = schema.display_name
return True return True
except Exception as e: except Exception as e:
logging.warning(f"Error while calling comfy_entrypoint in {module_path}: {e}") logging.warning("Error while calling comfy_entrypoint in %s: %s", module_path, e)
return False return False
else: else:
logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).") logging.warning("Skip %s module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).", module_path)
return False return False
except Exception as e: except Exception as e:
logging.warning(traceback.format_exc()) logging.warning(traceback.format_exc())
logging.warning(f"Cannot import {module_path} module for custom nodes: {e}") logging.warning("Cannot import %s module for custom nodes: %s", module_path, e)
return False return False
async def init_external_custom_nodes(): async def init_external_custom_nodes():
@ -2252,12 +2252,12 @@ async def init_external_custom_nodes():
if module_path.endswith(".disabled"): if module_path.endswith(".disabled"):
continue continue
if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes:
logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") logging.info("Skipping %s due to disable_all_custom_nodes and whitelist_custom_nodes", possible_module)
continue continue
if args.enable_manager: if args.enable_manager:
if comfyui_manager.should_be_disabled(module_path): if comfyui_manager.should_be_disabled(module_path):
logging.info(f"Blocked by policy: {module_path}") logging.info("Blocked by policy: %s", module_path)
continue continue
time_before = time.perf_counter() time_before = time.perf_counter()
@ -2271,7 +2271,7 @@ async def init_external_custom_nodes():
import_message = "" import_message = ""
else: else:
import_message = " (IMPORT FAILED)" import_message = " (IMPORT FAILED)"
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) logging.info("%6.1f seconds%s: %s", n[0], import_message, n[1])
logging.info("") logging.info("")
async def init_builtin_extra_nodes(): async def init_builtin_extra_nodes():
@ -2440,7 +2440,7 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True):
if len(import_failed_api) > 0: if len(import_failed_api) > 0:
logging.warning("WARNING: some comfy_api_nodes/ nodes did not import correctly. This may be because they are missing some dependencies.\n") logging.warning("WARNING: some comfy_api_nodes/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
for node in import_failed_api: for node in import_failed_api:
logging.warning("IMPORT FAILED: {}".format(node)) logging.warning("IMPORT FAILED: %s", node)
logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.") logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
if args.windows_standalone_build: if args.windows_standalone_build:
logging.warning("Please run the update script: update/update_comfyui.bat") logging.warning("Please run the update script: update/update_comfyui.bat")
@ -2451,7 +2451,8 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True):
if len(import_failed) > 0: if len(import_failed) > 0:
logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n") logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n")
for node in import_failed: for node in import_failed:
logging.warning("IMPORT FAILED: {}".format(node)) logging.warning("IMPORT FAILED: %s", node)
# logging.warning("IMPORT FAILED: {}".format(node))
logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.") logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.")
if args.windows_standalone_build: if args.windows_standalone_build:
logging.warning("Please run the update script: update/update_comfyui.bat") logging.warning("Please run the update script: update/update_comfyui.bat")

View File

@ -21,6 +21,7 @@ lint.select = [
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names. # The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f # See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
"F", "F",
"G" # logging rules
] ]
lint.ignore = ["E501", "E722", "E731", "E712", "E402", "E741"] lint.ignore = ["E501", "E722", "E731", "E712", "E402", "E741"]

605
server.py

File diff suppressed because it is too large Load Diff

View File

@ -30,5 +30,5 @@ def load_extra_path_config(yaml_path):
elif not os.path.isabs(full_path): elif not os.path.isabs(full_path):
full_path = os.path.abspath(os.path.join(yaml_dir, y)) full_path = os.path.abspath(os.path.join(yaml_dir, y))
normalized_path = os.path.normpath(full_path) normalized_path = os.path.normpath(full_path)
logging.info("Adding extra search path {} {}".format(x, normalized_path)) logging.info("Adding extra search path %s: %s", x, normalized_path)
folder_paths.add_model_folder_path(x, normalized_path, is_default) folder_paths.add_model_folder_path(x, normalized_path, is_default)