diff --git a/app/app_settings.py b/app/app_settings.py index c7ac73bf6..2a3080d81 100644 --- a/app/app_settings.py +++ b/app/app_settings.py @@ -22,7 +22,7 @@ class AppSettings(): with open(file) as f: return json.load(f) except: - logging.error(f"The user settings file is corrupted: {file}") + logging.error("The user settings file is corrupted: %s", file) return {} else: return {} diff --git a/app/custom_node_manager.py b/app/custom_node_manager.py index 281febca9..9d562d4c7 100644 --- a/app/custom_node_manager.py +++ b/app/custom_node_manager.py @@ -27,7 +27,7 @@ def safe_load_json_file(file_path: str) -> dict: with open(file_path, "r", encoding="utf-8") as f: return json.load(f) except json.JSONDecodeError: - logging.error(f"Error loading {file_path}") + logging.error("Error loading %s", file_path) return {} diff --git a/app/database/db.py b/app/database/db.py index 1de8b80ed..d3ae27fa8 100644 --- a/app/database/db.py +++ b/app/database/db.py @@ -67,7 +67,7 @@ def get_db_path(): def init_db(): db_url = args.database_url - logging.debug(f"Database URL: {db_url}") + logging.debug("Database URL: %s", db_url) db_path = get_db_path() db_exists = os.path.exists(db_path) @@ -95,7 +95,7 @@ def init_db(): try: command.upgrade(config, target_rev) - logging.info(f"Database upgraded from {current_rev} to {target_rev}") + logging.info("Database upgraded from %s to %s", current_rev, target_rev) except Exception as e: if backup_path: # Restore the database from backup if upgrade fails diff --git a/app/frontend_management.py b/app/frontend_management.py index bdaa85812..6ac59ca15 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -53,7 +53,7 @@ def get_required_frontend_version(): if line.startswith("comfyui-frontend-package=="): version_str = line.split("==")[-1] if not is_valid_version(version_str): - logging.error(f"Invalid version format in requirements.txt: {version_str}") + logging.error("Invalid version format in requirements.txt: %s", version_str) return None return version_str logging.error("comfyui-frontend-package not found in requirements.txt") @@ -62,7 +62,7 @@ def get_required_frontend_version(): logging.error("requirements.txt not found. Cannot determine required frontend version.") return None except Exception as e: - logging.error(f"Error reading requirements.txt: {e}") + logging.error("Error reading requirements.txt: %s", e) return None @@ -89,7 +89,7 @@ ________________________________________________________________________ else: logging.info("ComfyUI frontend version: {}".format(frontend_version_str)) except Exception as e: - logging.error(f"Failed to check frontend version: {e}") + logging.error("Failed to check frontend version: %s", e) REQUEST_TIMEOUT = 10 # seconds @@ -225,7 +225,7 @@ class FrontendManager: if line.startswith("comfyui-workflow-templates=="): version_str = line.split("==")[-1] if not is_valid_version(version_str): - logging.error(f"Invalid templates version format in requirements.txt: {version_str}") + logging.error("Invalid templates version format in requirements.txt: %s", version_str) return None return version_str logging.error("comfyui-workflow-templates not found in requirements.txt") @@ -234,7 +234,7 @@ class FrontendManager: logging.error("requirements.txt not found. Cannot determine required templates version.") return None except Exception as e: - logging.error(f"Error reading requirements.txt: {e}") + logging.error("Error reading requirements.txt: %s", e) return None @classmethod @@ -282,7 +282,7 @@ comfyui-workflow-templates is not installed. try: template_entries = list(iter_templates()) except Exception as exc: - logging.error(f"Failed to enumerate workflow templates: {exc}") + logging.error("Failed to enumerate workflow templates: %s", exc) return None asset_map: Dict[str, str] = {} @@ -293,7 +293,7 @@ comfyui-workflow-templates is not installed. entry.template_id, asset.filename ) except Exception as exc: - logging.error(f"Failed to resolve template asset paths: {exc}") + logging.error("Failed to resolve template asset paths: %s", exc) return None if not asset_map: diff --git a/app/model_manager.py b/app/model_manager.py index f124d1117..e32d0ff27 100644 --- a/app/model_manager.py +++ b/app/model_manager.py @@ -144,7 +144,7 @@ class ModelFileManager: result.append(file_info) except Exception as e: - logging.warning(f"Warning: Unable to access {file_name}. Error: {e}. Skipping this file.") + logging.warning("Warning: Unable to access %s. Error: %s. Skipping this file.", file_name, e) continue for d in subdirs: @@ -152,7 +152,7 @@ class ModelFileManager: try: dirs[path] = os.path.getmtime(path) except FileNotFoundError: - logging.warning(f"Warning: Unable to access {path}. Skipping this path.") + logging.warning("Warning: Unable to access %s. Skipping this path.", path) continue return result, dirs, time.perf_counter() diff --git a/app/user_manager.py b/app/user_manager.py index e2c00dab2..8f49ac56a 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -241,7 +241,7 @@ class UserManager(): try: requested_rel_path = parse.unquote(requested_rel_path) except Exception as e: - logging.warning(f"Failed to decode path parameter: {requested_rel_path}, Error: {e}") + logging.warning("Failed to decode path parameter: %s, Error: %s", requested_rel_path, e) return web.Response(status=400, text="Invalid characters in path parameter") @@ -256,7 +256,7 @@ class UserManager(): except KeyError as e: # Invalid user detected by get_request_user_id inside get_request_user_filepath - logging.warning(f"Access denied for user: {e}") + logging.warning("Access denied for user: %s", e) return web.Response(status=403, text="Invalid user specified in request") @@ -304,11 +304,11 @@ class UserManager(): entry_info["size"] = stats.st_size entry_info["modified"] = stats.st_mtime except OSError as stat_error: - logging.warning(f"Could not stat file {file_path}: {stat_error}") + logging.warning("Could not stat file %s: %s", file_path, stat_error) pass # Include file with available info results.append(entry_info) except OSError as e: - logging.error(f"Error listing directory {target_abs_path}: {e}") + logging.error("Error listing directory %s: %s", target_abs_path, e) return web.Response(status=500, text="Error reading directory contents") # Sort results alphabetically, directories first then files @@ -380,7 +380,7 @@ class UserManager(): with open(path, "wb") as f: f.write(body) except OSError as e: - logging.warning(f"Error saving file '{path}': {e}") + logging.warning("Error saving file '%s': %s", path, e) return web.Response( status=400, reason="Invalid filename. Please avoid special characters like :\\/*?\"<>|" @@ -444,7 +444,7 @@ class UserManager(): if not overwrite and os.path.exists(dest): return web.Response(status=409, text="File already exists") - logging.info(f"moving '{source}' -> '{dest}'") + logging.info("moving '%s' -> '%s'", source, dest) shutil.move(source, dest) user_path = self.get_request_user_filepath(request, None) diff --git a/comfy/context_windows.py b/comfy/context_windows.py index 2f82d51da..732214d98 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -124,9 +124,9 @@ class IndexListContextHandler(ContextHandlerABC): def should_use_context(self, model: BaseModel, conds: list[list[dict]], x_in: torch.Tensor, timestep: torch.Tensor, model_options: dict[str]) -> bool: # for now, assume first dim is batch - should have stored on BaseModel in actual implementation if x_in.size(self.dim) > self.context_length: - logging.info(f"Using context windows {self.context_length} with overlap {self.context_overlap} for {x_in.size(self.dim)} frames.") + logging.info("Using context windows %d with overlap %d for %d frames.", self.context_length, self.context_overlap, x_in.size(self.dim)) if self.cond_retain_index_list: - logging.info(f"Retaining original cond for indexes: {self.cond_retain_index_list}") + logging.info("Retaining original cond for indexes: %s", self.cond_retain_index_list) return True return False @@ -143,7 +143,7 @@ class IndexListContextHandler(ContextHandlerABC): # if multiple conds, split based on primary region if self.split_conds_to_windows and len(cond_in) > 1: region = window.get_region_index(len(cond_in)) - logging.info(f"Splitting conds to windows; using region {region} for window {window.index_list[0]}-{window.index_list[-1]} with center ratio {window.center_ratio:.3f}") + logging.info("Splitting conds to windows; using region %d for window %d-%d with center ratio %.3f", region, window.index_list[0], window.index_list[-1], window.center_ratio) cond_in = [cond_in[region]] # cond object is a list containing a dict - outer list is irrelevant, so just loop through it for actual_cond in cond_in: diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index fb9495348..7408c892c 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -86,7 +86,7 @@ def convert_vae_state_dict(vae_state_dict): for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: - logging.debug(f"Reshaping {k} for SD format") + logging.debug("Reshaping %s for SD format", k) new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d) return new_state_dict diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index c57e081e4..ff511c3e8 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -475,7 +475,7 @@ class UniPC: return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs) def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True): - logging.info(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') + logging.info("using unified predictor-corrector with order %s (solver type: vary coeff)", order) ns = self.noise_schedule assert order <= len(model_prev_list) diff --git a/comfy/hooks.py b/comfy/hooks.py index 1a76c7ba4..09cd5e68c 100644 --- a/comfy/hooks.py +++ b/comfy/hooks.py @@ -666,7 +666,7 @@ def load_hook_lora_for_models(model: ModelPatcher, clip: CLIP, lora: dict[str, t k1 = set(k1) for x in loaded: if (x not in k) and (x not in k1): - logging.warning(f"NOT LOADED {x}") + logging.warning("NOT LOADED %s", x) return (new_modelpatcher, new_clip, hook_group) def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]): diff --git a/comfy/ldm/cosmos/model.py b/comfy/ldm/cosmos/model.py index 52ef7ef43..58e2d1b52 100644 --- a/comfy/ldm/cosmos/model.py +++ b/comfy/ldm/cosmos/model.py @@ -216,7 +216,7 @@ class GeneralDIT(nn.Module): else: raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") - logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") + logging.debug("Building positional embedding with %s class, impl %s", self.pos_emb_cls, cls_type) kwargs = dict( model_channels=self.model_channels, len_h=self.max_img_h // self.patch_spatial, diff --git a/comfy/ldm/cosmos/predict2.py b/comfy/ldm/cosmos/predict2.py index 07a4fc79f..931dcb52a 100644 --- a/comfy/ldm/cosmos/predict2.py +++ b/comfy/ldm/cosmos/predict2.py @@ -718,7 +718,7 @@ class MiniTrainDIT(nn.Module): else: raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") - logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") + logging.debug("Building positional embedding with %s class, impl %s", self.pos_emb_cls, cls_type) kwargs = dict( model_channels=self.model_channels, len_h=self.max_img_h // self.patch_spatial, diff --git a/comfy/ldm/cosmos/vae.py b/comfy/ldm/cosmos/vae.py index d64f292de..15457e26a 100644 --- a/comfy/ldm/cosmos/vae.py +++ b/comfy/ldm/cosmos/vae.py @@ -90,7 +90,7 @@ class CausalContinuousVideoTokenizer(nn.Module): self.distribution = IdentityDistribution() # ContinuousFormulation[formulation_name].value() num_parameters = sum(param.numel() for param in self.parameters()) - logging.debug(f"model={self.name}, num_parameters={num_parameters:,}") + logging.debug("model=%s, num_parameters=%d", self.name, num_parameters) logging.debug( f"z_channels={z_channels}, latent_channels={self.latent_channels}." ) diff --git a/comfy/ldm/lightricks/vae/causal_audio_autoencoder.py b/comfy/ldm/lightricks/vae/causal_audio_autoencoder.py index f12b9bb53..7aaf1c87d 100644 --- a/comfy/ldm/lightricks/vae/causal_audio_autoencoder.py +++ b/comfy/ldm/lightricks/vae/causal_audio_autoencoder.py @@ -401,9 +401,9 @@ def make_attn(in_channels, attn_type="vanilla", norm_type="group"): attn_type = AttentionType.str_to_enum(attn_type) if attn_type != AttentionType.NONE: - logging.info(f"making attention of type '{attn_type.value}' with {in_channels} in_channels") + logging.info("making attention of type '%s' with %s in_channels", attn_type.value, in_channels) else: - logging.info(f"making identity attention with {in_channels} in_channels") + logging.info("making identity attention with %s in_channels", in_channels) match attn_type: case AttentionType.VANILLA: diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index 4f50810dc..0a148467a 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -58,7 +58,7 @@ class AbstractAutoencoder(torch.nn.Module): if self.use_ema: self.model_ema = LitEma(self, decay=ema_decay) - logging.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + logging.info("Keeping EMAs of %s.", len(list(self.model_ema.buffers()))) def get_input(self, batch) -> Any: raise NotImplementedError() @@ -74,14 +74,14 @@ class AbstractAutoencoder(torch.nn.Module): self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: - logging.info(f"{context}: Switched to EMA weights") + logging.info("%s: Switched to EMA weights", context) try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: - logging.info(f"{context}: Restored training weights") + logging.info("%s: Restored training weights", context) def encode(self, *args, **kwargs) -> torch.Tensor: raise NotImplementedError("encode()-method of abstract base class called") @@ -90,7 +90,7 @@ class AbstractAutoencoder(torch.nn.Module): raise NotImplementedError("decode()-method of abstract base class called") def instantiate_optimizer_from_config(self, params, lr, cfg): - logging.info(f"loading >>> {cfg['target']} <<< optimizer from config") + logging.info("loading >>> %s <<< optimizer from config", cfg['target']) return get_obj_from_str(cfg["target"])( params, lr=lr, **cfg.get("params", dict()) ) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index ccf690945..e355e47f7 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -10,7 +10,7 @@ import logging import functools from .diffusionmodules.util import AlphaBlender, timestep_embedding -from .sub_quadratic_attention import efficient_dot_product_attention +from comfy.ldm.modules.sub_quadratic_attention import efficient_dot_product_attention from comfy import model_management @@ -25,7 +25,11 @@ try: except ImportError as e: if model_management.sage_attention_enabled(): if e.name == "sageattention": - logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention") + logging.error(""" + +To use the `--use-sage-attention` feature, the `sageattention` package must be installed first. +command: + %s -m pip install sageattention""", sys.executable) else: raise e exit(-1) @@ -43,7 +47,11 @@ try: FLASH_ATTENTION_IS_AVAILABLE = True except ImportError: if model_management.flash_attention_enabled(): - logging.error(f"\n\nTo use the `--use-flash-attention` feature, the `flash-attn` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install flash-attn") + logging.error(""" + +To use the `--use-flash-attention` feature, the `flash-attn` package must be installed first. +command: + %s -m pip install flash-attn""", sys.executable) exit(-1) REGISTERED_ATTENTION_FUNCTIONS = {} @@ -52,7 +60,7 @@ def register_attention_function(name: str, func: Callable): if name not in REGISTERED_ATTENTION_FUNCTIONS: REGISTERED_ATTENTION_FUNCTIONS[name] = func else: - logging.warning(f"Attention function {name} already registered, skipping registration.") + logging.warning("Attention function %s already registered, skipping registration.", name) def get_attention_function(name: str, default: Any=...) -> Union[Callable, None]: if name == "optimized": @@ -707,7 +715,7 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape causal=False, ).transpose(1, 2) except Exception as e: - logging.warning(f"Flash Attention failed, using default SDPA: {e}") + logging.warning("Flash Attention failed, using default SDPA: %s", e) out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) if not skip_output_reshape: out = ( diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 233011dc9..d45dbbd38 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -131,7 +131,7 @@ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timestep # add one to get the final alpha values right (the ones from first scale to data during sampling) steps_out = ddim_timesteps + 1 if verbose: - logging.info(f'Selected timesteps for ddim sampler: {steps_out}') + logging.info("Selected timesteps for ddim sampler: %s", steps_out) return steps_out @@ -143,7 +143,7 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): # according the the formula provided in https://arxiv.org/abs/2010.02502 sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) if verbose: - logging.info(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + logging.info("Selected alphas for ddim sampler: a_t: %s; a_(t-1): %s", alphas, alphas_prev) logging.info(f'For the chosen value of eta, which is {eta}, ' f'this results in the following sigma_t schedule for ddim sampler {sigmas}') return sigmas, alphas, alphas_prev diff --git a/comfy/ldm/util.py b/comfy/ldm/util.py index 304936ff4..d35e35fbb 100644 --- a/comfy/ldm/util.py +++ b/comfy/ldm/util.py @@ -66,7 +66,7 @@ def mean_flat(tensor): def count_params(model, verbose=False): total_params = sum(p.numel() for p in model.parameters()) if verbose: - logging.info(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") + logging.info("%s has %.2f M params.", model.__class__.__name__, total_params * 1e-06) return total_params diff --git a/comfy/model_management.py b/comfy/model_management.py index 928282092..9fc950a61 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -415,7 +415,7 @@ if cpu_state != CPUState.GPU: if cpu_state == CPUState.MPS: vram_state = VRAMState.SHARED -logging.info(f"Set vram state to: {vram_state.name}") +logging.info("Set vram state to: %s", vram_state.name) DISABLE_SMART_MEMORY = args.disable_smart_memory @@ -602,7 +602,7 @@ def free_memory(memory_required, device, keep_loaded=[]): if free_mem > memory_required: break memory_to_free = memory_required - free_mem - logging.debug(f"Unloading {current_loaded_models[i].model.model.__class__.__name__}") + logging.debug("Unloading %s", current_loaded_models[i].model.model.__class__.__name__) if current_loaded_models[i].model_unload(memory_to_free): unloaded_model.append(i) @@ -652,7 +652,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu models_to_load.append(loaded) else: if hasattr(x, "model"): - logging.info(f"Requested to load {x.model.__class__.__name__}") + logging.info("Requested to load %s", x.model.__class__.__name__) models_to_load.append(loaded_model) for loaded_model in models_to_load: diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 93d26c690..2bcea5207 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -1256,7 +1256,7 @@ class ModelPatcher: model_sd_keys_set = set(model_sd_keys) for key in cached_weights: if key not in model_sd_keys: - logging.warning(f"Cached hook could not patch. Key does not exist in model: {key}") + logging.warning("Cached hook could not patch. Key does not exist in model: %s", key) continue self.patch_cached_hook_weights(cached_weights=cached_weights, key=key, memory_counter=memory_counter) model_sd_keys_set.remove(key) @@ -1269,7 +1269,7 @@ class ModelPatcher: original_weights = self.get_key_patches() for key in relevant_patches: if key not in model_sd_keys: - logging.warning(f"Cached hook would not patch. Key does not exist in model: {key}") + logging.warning("Cached hook would not patch. Key does not exist in model: %s", key) continue self.patch_hook_weight_to_device(hooks=hooks, combined_patches=relevant_patches, key=key, original_weights=original_weights, memory_counter=memory_counter) diff --git a/comfy/quant_ops.py b/comfy/quant_ops.py index 5a17bc6f5..94363db56 100644 --- a/comfy/quant_ops.py +++ b/comfy/quant_ops.py @@ -22,9 +22,9 @@ try: ck.registry.disable("triton") for k, v in ck.list_backends().items(): - logging.info(f"Found comfy_kitchen backend {k}: {v}") + logging.info("Found comfy_kitchen backend %s: %s", k, v) except ImportError as e: - logging.error(f"Failed to import comfy_kitchen, Error: {e}, fp8 and fp4 support will not be available.") + logging.error("Failed to import comfy_kitchen, Error: %s, fp8 and fp4 support will not be available.", e) _CK_AVAILABLE = False class QuantizedTensor: diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index c512ca5d0..faaa0a59b 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -570,7 +570,7 @@ class SDTokenizer: embedding_name = word[len(self.embedding_identifier):].strip('\n') embed, leftover = self._try_get_embedding(embedding_name) if embed is None: - logging.warning(f"warning, embedding:{embedding_name} does not exist, ignoring") + logging.warning("warning, embedding:%s does not exist, ignoring", embedding_name) else: if len(embed.shape) == 1: tokens.append([(embed, weight)]) diff --git a/comfy_api/generate_api_stubs.py b/comfy_api/generate_api_stubs.py index 604a7eced..7520664b2 100644 --- a/comfy_api/generate_api_stubs.py +++ b/comfy_api/generate_api_stubs.py @@ -31,7 +31,7 @@ def generate_stubs_for_module(module_name: str) -> None: if api_class: # Generate the stub file AsyncToSyncConverter.generate_stub_file(api_class, sync_class) - logging.info(f"Generated stub file for {module_name}") + logging.info("Generated stub file for %s", module_name) else: logging.warning( f"Module {module_name} has ComfyAPISync but no ComfyAPI" @@ -46,14 +46,14 @@ def generate_stubs_for_module(module_name: str) -> None: # Generate the stub file AsyncToSyncConverter.generate_stub_file(api_class, sync_class) - logging.info(f"Generated stub file for {module_name}") + logging.info("Generated stub file for %s", module_name) else: logging.warning( f"Module {module_name} does not export ComfyAPI or ComfyAPISync" ) except Exception as e: - logging.error(f"Failed to generate stub for {module_name}: {e}") + logging.error("Failed to generate stub for %s: %s", module_name, e) import traceback traceback.print_exc() @@ -73,7 +73,7 @@ def main(): if module_name not in api_modules: api_modules.append(module_name) - logging.info(f"Found {len(api_modules)} API modules: {api_modules}") + logging.info("Found %s API modules: %s", len(api_modules), api_modules) # Generate stubs for each module for module_name in api_modules: diff --git a/comfy_api/internal/async_to_sync.py b/comfy_api/internal/async_to_sync.py index c9b0576e1..3b44f5469 100644 --- a/comfy_api/internal/async_to_sync.py +++ b/comfy_api/internal/async_to_sync.py @@ -962,7 +962,7 @@ class AsyncToSyncConverter: seen.add(imp) unique_imports.append(imp) else: - logging.warning(f"Duplicate import detected: {imp}") + logging.warning("Duplicate import detected: %s", imp) # Replace the placeholder with actual imports stub_content[imports_placeholder_index : imports_placeholder_index + 1] = ( @@ -976,7 +976,7 @@ class AsyncToSyncConverter: with open(sync_stub_path, "w") as f: f.write("\n".join(stub_content)) - logging.info(f"Generated stub file: {sync_stub_path}") + logging.info("Generated stub file: %s", sync_stub_path) except Exception as e: # If stub generation fails, log the error but don't break the main functionality diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 94ad5e8a8..e1ae524b3 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -405,11 +405,11 @@ def match_audio_sample_rates(waveform_1, sample_rate_1, waveform_2, sample_rate_ if sample_rate_1 > sample_rate_2: waveform_2 = torchaudio.functional.resample(waveform_2, sample_rate_2, sample_rate_1) output_sample_rate = sample_rate_1 - logging.info(f"Resampling audio2 from {sample_rate_2}Hz to {sample_rate_1}Hz for merging.") + logging.info("Resampling audio2 from %sHz to %sHz for merging.", sample_rate_2, sample_rate_1) else: waveform_1 = torchaudio.functional.resample(waveform_1, sample_rate_1, sample_rate_2) output_sample_rate = sample_rate_2 - logging.info(f"Resampling audio1 from {sample_rate_1}Hz to {sample_rate_2}Hz for merging.") + logging.info("Resampling audio1 from %sHz to %sHz for merging.", sample_rate_1, sample_rate_2) else: output_sample_rate = sample_rate_1 return waveform_1, waveform_2, output_sample_rate @@ -495,10 +495,10 @@ class AudioMerge(IO.ComfyNode): length_2 = waveform_2.shape[-1] if length_2 > length_1: - logging.info(f"AudioMerge: Trimming audio2 from {length_2} to {length_1} samples to match audio1 length.") + logging.info("AudioMerge: Trimming audio2 from %s to %s samples to match audio1 length.", length_2, length_1) waveform_2 = waveform_2[..., :length_1] elif length_2 < length_1: - logging.info(f"AudioMerge: Padding audio2 from {length_2} to {length_1} samples to match audio1 length.") + logging.info("AudioMerge: Padding audio2 from %s to %s samples to match audio1 length.", length_2, length_1) pad_shape = list(waveform_2.shape) pad_shape[-1] = length_1 - length_2 pad_tensor = torch.zeros(pad_shape, dtype=waveform_2.dtype, device=waveform_2.device) diff --git a/comfy_extras/nodes_dataset.py b/comfy_extras/nodes_dataset.py index 5ef851bd0..874db3503 100644 --- a/comfy_extras/nodes_dataset.py +++ b/comfy_extras/nodes_dataset.py @@ -110,7 +110,7 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode): @classmethod def execute(cls, folder): - logging.info(f"Loading images from folder: {folder}") + logging.info("Loading images from folder: %s", folder) sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] @@ -149,7 +149,7 @@ class LoadImageTextDataSetFromFolderNode(io.ComfyNode): output_tensor = load_and_process_images(image_files, sub_input_dir) - logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") + logging.info("Loaded %s images from %s.", len(output_tensor), sub_input_dir) return io.NodeOutput(output_tensor, captions) @@ -236,7 +236,7 @@ class SaveImageDataSetToFolderNode(io.ComfyNode): output_dir = os.path.join(folder_paths.get_output_directory(), folder_name) saved_files = save_images_to_folder(images, output_dir, filename_prefix) - logging.info(f"Saved {len(saved_files)} images to {output_dir}.") + logging.info("Saved %s images to %s.", len(saved_files), output_dir) return io.NodeOutput() @@ -283,7 +283,7 @@ class SaveImageTextDataSetToFolderNode(io.ComfyNode): with open(caption_path, "w", encoding="utf-8") as f: f.write(caption) - logging.info(f"Saved {len(saved_files)} images and captions to {output_dir}.") + logging.info("Saved %s images and captions to %s.", len(saved_files), output_dir) return io.NodeOutput() @@ -1104,7 +1104,7 @@ class MergeImageListsNode(ImageProcessingNode): """Simply return the images list (already merged by input handling).""" # When multiple list inputs are connected, they're concatenated # For now, this is a simple pass-through - logging.info(f"Merged image list contains {len(images)} images") + logging.info("Merged image list contains %s images", len(images)) return images @@ -1121,7 +1121,7 @@ class MergeTextListsNode(TextProcessingNode): """Simply return the texts list (already merged by input handling).""" # When multiple list inputs are connected, they're concatenated # For now, this is a simple pass-through - logging.info(f"Merged text list contains {len(texts)} texts") + logging.info("Merged text list contains %s texts", len(texts)) return texts @@ -1217,7 +1217,7 @@ class ResolutionBucket(io.ComfyNode): f"Resolution bucket ({h}x{w}): {len(bucket_data['latents'])} samples" ) - logging.info(f"Created {len(buckets)} resolution buckets from {len(flat_latents)} samples") + logging.info("Created %s resolution buckets from %s samples", len(buckets), len(flat_latents)) return io.NodeOutput(output_latents, output_conditions) @@ -1283,7 +1283,7 @@ class MakeTrainingDataset(io.ComfyNode): ) # Encode images with VAE - logging.info(f"Encoding {num_images} images with VAE...") + logging.info("Encoding %s images with VAE...", num_images) latents_list = [] # list[{"samples": tensor}] for img_tensor in images: # img_tensor is [1, H, W, 3] @@ -1291,7 +1291,7 @@ class MakeTrainingDataset(io.ComfyNode): latents_list.append({"samples": latent_tensor}) # Encode texts with CLIP - logging.info(f"Encoding {len(texts)} texts with CLIP...") + logging.info("Encoding %s texts with CLIP...", len(texts)) conditioning_list = [] # list[list[cond]] for text in texts: if text == "": @@ -1404,7 +1404,7 @@ class SaveTrainingDataset(io.ComfyNode): with open(metadata_path, "w") as f: json.dump(metadata, f, indent=2) - logging.info(f"Successfully saved {num_samples} samples to {output_dir}.") + logging.info("Successfully saved %s samples to %s.", num_samples, output_dir) return io.NodeOutput() @@ -1459,7 +1459,7 @@ class LoadTrainingDataset(io.ComfyNode): if not shard_files: raise ValueError(f"No shard files found in {dataset_dir}") - logging.info(f"Loading {len(shard_files)} shards from {dataset_dir}...") + logging.info("Loading %s shards from %s...", len(shard_files), dataset_dir) # Load all shards all_latents = [] # list[{"samples": tensor}] @@ -1474,7 +1474,7 @@ class LoadTrainingDataset(io.ComfyNode): all_latents.extend(shard_data["latents"]) all_conditioning.extend(shard_data["conditioning"]) - logging.info(f"Loaded {shard_file}: {len(shard_data['latents'])} samples") + logging.info("Loaded %s: %s samples", shard_file, len(shard_data['latents'])) logging.info( f"Successfully loaded {len(all_latents)} samples from {dataset_dir}." diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index 11b23ffdb..220684a7f 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -32,7 +32,7 @@ def easycache_forward_wrapper(executor, *args, **kwargs): # if first cond marked this step for skipping, skip it and use appropriate cached values if easycache.skip_current_step: if easycache.verbose: - logging.info(f"EasyCache [verbose] - was marked to skip this step by {easycache.first_cond_uuid}. Present uuids: {uuids}") + logging.info("EasyCache [verbose] - was marked to skip this step by %s. Present uuids: %s", easycache.first_cond_uuid, uuids) return easycache.apply_cache_diff(x, uuids) if easycache.initial_step: easycache.first_cond_uuid = uuids[0] @@ -46,13 +46,13 @@ def easycache_forward_wrapper(executor, *args, **kwargs): easycache.cumulative_change_rate += approx_output_change_rate if easycache.cumulative_change_rate < easycache.reuse_threshold: if easycache.verbose: - logging.info(f"EasyCache [verbose] - skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + logging.info("EasyCache [verbose] - skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold) # other conds should also skip this step, and instead use their cached values easycache.skip_current_step = True return easycache.apply_cache_diff(x, uuids) else: if easycache.verbose: - logging.info(f"EasyCache [verbose] - NOT skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + logging.info("EasyCache [verbose] - NOT skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold) easycache.cumulative_change_rate = 0.0 output: torch.Tensor = executor(*args, **kwargs) @@ -65,11 +65,11 @@ def easycache_forward_wrapper(executor, *args, **kwargs): approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm easycache.approx_output_change_rates.append(approx_output_change_rate.item()) if easycache.verbose: - logging.info(f"EasyCache [verbose] - approx_output_change_rate: {approx_output_change_rate}") + logging.info("EasyCache [verbose] - approx_output_change_rate: %s", approx_output_change_rate) if input_change is not None: easycache.relative_transformation_rate = output_change / input_change if easycache.verbose: - logging.info(f"EasyCache [verbose] - output_change_rate: {output_change_rate}") + logging.info("EasyCache [verbose] - output_change_rate: %s", output_change_rate) # TODO: allow cache_diff to be offloaded easycache.update_cache_diff(output, next_x_prev, uuids) if has_first_cond_uuid: @@ -77,7 +77,7 @@ def easycache_forward_wrapper(executor, *args, **kwargs): easycache.output_prev_subsampled = easycache.subsample(output, uuids) easycache.output_prev_norm = output.flatten().abs().mean() if easycache.verbose: - logging.info(f"EasyCache [verbose] - x_prev_subsampled: {easycache.x_prev_subsampled.shape}") + logging.info("EasyCache [verbose] - x_prev_subsampled: %s", easycache.x_prev_subsampled.shape) return output def lazycache_predict_noise_wrapper(executor, *args, **kwargs): @@ -102,13 +102,13 @@ def lazycache_predict_noise_wrapper(executor, *args, **kwargs): easycache.cumulative_change_rate += approx_output_change_rate if easycache.cumulative_change_rate < easycache.reuse_threshold: if easycache.verbose: - logging.info(f"LazyCache [verbose] - skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + logging.info("LazyCache [verbose] - skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold) # other conds should also skip this step, and instead use their cached values easycache.skip_current_step = True return easycache.apply_cache_diff(x) else: if easycache.verbose: - logging.info(f"LazyCache [verbose] - NOT skipping step; cumulative_change_rate: {easycache.cumulative_change_rate}, reuse_threshold: {easycache.reuse_threshold}") + logging.info("LazyCache [verbose] - NOT skipping step; cumulative_change_rate: %s, reuse_threshold: %s", easycache.cumulative_change_rate, easycache.reuse_threshold) easycache.cumulative_change_rate = 0.0 output: torch.Tensor = executor(*args, **kwargs) if easycache.has_output_prev_norm(): @@ -120,18 +120,18 @@ def lazycache_predict_noise_wrapper(executor, *args, **kwargs): approx_output_change_rate = (easycache.relative_transformation_rate * input_change) / easycache.output_prev_norm easycache.approx_output_change_rates.append(approx_output_change_rate.item()) if easycache.verbose: - logging.info(f"LazyCache [verbose] - approx_output_change_rate: {approx_output_change_rate}") + logging.info("LazyCache [verbose] - approx_output_change_rate: %s", approx_output_change_rate) if input_change is not None: easycache.relative_transformation_rate = output_change / input_change if easycache.verbose: - logging.info(f"LazyCache [verbose] - output_change_rate: {output_change_rate}") + logging.info("LazyCache [verbose] - output_change_rate: %s", output_change_rate) # TODO: allow cache_diff to be offloaded easycache.update_cache_diff(output, next_x_prev) easycache.x_prev_subsampled = easycache.subsample(next_x_prev) easycache.output_prev_subsampled = easycache.subsample(output) easycache.output_prev_norm = output.flatten().abs().mean() if easycache.verbose: - logging.info(f"LazyCache [verbose] - x_prev_subsampled: {easycache.x_prev_subsampled.shape}") + logging.info("LazyCache [verbose] - x_prev_subsampled: %s", easycache.x_prev_subsampled.shape) return output def easycache_calc_cond_batch_wrapper(executor, *args, **kwargs): @@ -152,22 +152,22 @@ def easycache_sample_wrapper(executor, *args, **kwargs): # clone and prepare timesteps guider.model_options["transformer_options"]["easycache"] = guider.model_options["transformer_options"]["easycache"].clone().prepare_timesteps(guider.model_patcher.model.model_sampling) easycache: Union[EasyCacheHolder, LazyCacheHolder] = guider.model_options['transformer_options']['easycache'] - logging.info(f"{easycache.name} enabled - threshold: {easycache.reuse_threshold}, start_percent: {easycache.start_percent}, end_percent: {easycache.end_percent}") + logging.info("%s enabled - threshold: %s, start_percent: %s, end_percent: %s", easycache.name, easycache.reuse_threshold, easycache.start_percent, easycache.end_percent) return executor(*args, **kwargs) finally: easycache = guider.model_options['transformer_options']['easycache'] output_change_rates = easycache.output_change_rates approx_output_change_rates = easycache.approx_output_change_rates if easycache.verbose: - logging.info(f"{easycache.name} [verbose] - output_change_rates {len(output_change_rates)}: {output_change_rates}") - logging.info(f"{easycache.name} [verbose] - approx_output_change_rates {len(approx_output_change_rates)}: {approx_output_change_rates}") + logging.info("%s [verbose] - output_change_rates %s: %s", easycache.name, len(output_change_rates), output_change_rates) + logging.info("%s [verbose] - approx_output_change_rates %s: %s", easycache.name, len(approx_output_change_rates), approx_output_change_rates) total_steps = len(args[3])-1 # catch division by zero for log statement; sucks to crash after all sampling is done try: speedup = total_steps/(total_steps-easycache.total_steps_skipped) except ZeroDivisionError: speedup = 1.0 - logging.info(f"{easycache.name} - skipped {easycache.total_steps_skipped}/{total_steps} steps ({speedup:.2f}x speedup).") + logging.info("%s - skipped %s/%s steps (%.2fx speedup).", easycache.name, easycache.total_steps_skipped, total_steps, speedup) easycache.reset() guider.model_options = orig_model_options diff --git a/comfy_extras/nodes_hooks.py b/comfy_extras/nodes_hooks.py index 1edc06f3d..e69dcde51 100644 --- a/comfy_extras/nodes_hooks.py +++ b/comfy_extras/nodes_hooks.py @@ -540,7 +540,7 @@ class CreateHookKeyframesInterpolated: is_first = False prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) if print_keyframes: - logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}") + logging.info("Hook Keyframe - start_percent:%s = %s", percent, strength) return (prev_hook_kf,) class CreateHookKeyframesFromFloats: @@ -589,7 +589,7 @@ class CreateHookKeyframesFromFloats: is_first = False prev_hook_kf.add(comfy.hooks.HookKeyframe(strength=strength, start_percent=percent, guarantee_steps=guarantee_steps)) if print_keyframes: - logging.info(f"Hook Keyframe - start_percent:{percent} = {strength}") + logging.info("Hook Keyframe - start_percent:%s = %s", percent, strength) return (prev_hook_kf,) #------------------------------------------ ########################################### diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 9ba1c4ba8..f221ff53d 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -456,10 +456,10 @@ class ReplaceVideoLatentFrames(io.ComfyNode): if index < 0: index = dest_frames + index if index > dest_frames: - logging.warning(f"ReplaceVideoLatentFrames: Index {index} is out of bounds for destination latent frames {dest_frames}.") + logging.warning("ReplaceVideoLatentFrames: Index %s is out of bounds for destination latent frames %s.", index, dest_frames) return io.NodeOutput(destination) if index + source_frames > dest_frames: - logging.warning(f"ReplaceVideoLatentFrames: Source latent frames {source_frames} do not fit within destination latent frames {dest_frames} at the specified index {index}.") + logging.warning("ReplaceVideoLatentFrames: Source latent frames %s do not fit within destination latent frames %s at the specified index %s.", source_frames, dest_frames, index) return io.NodeOutput(destination) s = source.copy() s_source = source["samples"] diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 364804205..eb217dc8d 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -390,7 +390,7 @@ def find_all_highest_child_module_with_forward( model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict) ): result.append(model) - logging.debug(f"Found module with forward: {name} ({model.__class__.__name__})") + logging.debug("Found module with forward: %s (%s)", name, model.__class__.__name__) return result name = name or "root" for next_name, child in model.named_children(): @@ -498,9 +498,9 @@ def _prepare_latents_and_count(latents, dtype, bucket_mode): num_images = sum(t.shape[0] for t in latents) multi_res = False # Not using multi_res path in bucket mode - logging.info(f"Bucket mode: {num_buckets} buckets, {num_images} total samples") + logging.info("Bucket mode: %s buckets, %s total samples", num_buckets, num_images) for i, lat in enumerate(latents): - logging.info(f" Bucket {i}: shape {lat.shape}") + logging.info(" Bucket %s: shape %s", i, lat.shape) return latents, num_images, multi_res # Non-bucket mode @@ -509,7 +509,7 @@ def _prepare_latents_and_count(latents, dtype, bucket_mode): latents = [t.to(dtype) for t in latents] for latent in latents: all_shapes.add(latent.shape) - logging.info(f"Latent shapes: {all_shapes}") + logging.info("Latent shapes: %s", all_shapes) if len(all_shapes) > 1: multi_res = True else: @@ -521,7 +521,7 @@ def _prepare_latents_and_count(latents, dtype, bucket_mode): num_images = latents.shape[0] multi_res = False else: - logging.error(f"Invalid latents type: {type(latents)}") + logging.error("Invalid latents type: %s", type(latents)) num_images = 0 multi_res = False @@ -545,7 +545,7 @@ def _validate_and_expand_conditioning(positive, num_images, bucket_mode): if bucket_mode: return positive # Skip validation in bucket mode - logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") + logging.info("Total Images: %s, Total Captions: %s", num_images, len(positive)) if len(positive) == 1 and num_images > 1: return positive * num_images elif len(positive) != num_images: diff --git a/execution.py b/execution.py index 648f204ec..da05b4a18 100644 --- a/execution.py +++ b/execution.py @@ -595,7 +595,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, for name, inputs in input_data_all.items(): input_data_formatted[name] = [format_value(x) for x in inputs] - logging.error(f"!!! Exception during processing !!! {ex}") + logging.error("!!! Exception during processing !!! %s", ex) logging.error(traceback.format_exc()) tips = "" @@ -1061,11 +1061,11 @@ async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[ if valid is True: good_outputs.add(o) else: - logging.error(f"Failed to validate prompt for output {o}:") + logging.error("Failed to validate prompt for output %s:", o) if len(reasons) > 0: logging.error("* (prompt):") for reason in reasons: - logging.error(f" - {reason['message']}: {reason['details']}") + logging.error(" - %s: %s", reason['message'], reason['details']) errors += [(o, reasons)] for node_id, result in validated.items(): valid = result[0] @@ -1081,9 +1081,9 @@ async def validate_prompt(prompt_id, prompt, partial_execution_list: Union[list[ "dependent_outputs": [], "class_type": class_type } - logging.error(f"* {class_type} {node_id}:") + logging.error("* %s %s:", class_type, node_id) for reason in reasons: - logging.error(f" - {reason['message']}: {reason['details']}") + logging.error(" - %s: %s", reason['message'], reason['details']) node_errors[node_id]["dependent_outputs"].append(o) logging.error("Output will be ignored") diff --git a/folder_paths.py b/folder_paths.py index 9c96540e3..3abd4ce56 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -314,7 +314,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None) try: dirs[directory] = os.path.getmtime(directory) except FileNotFoundError: - logging.warning(f"Warning: Unable to access {directory}. Skipping this path.") + logging.warning("Warning: Unable to access %s. Skipping this path.", directory) logging.debug("recursive file list on directory {}".format(directory)) dirpath: str @@ -328,7 +328,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None) relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) result.append(relative_path) except: - logging.warning(f"Warning: Unable to access {file_name}. Skipping this file.") + logging.warning("Warning: Unable to access %s. Skipping this file.", file_name) continue for d in subdirs: @@ -336,7 +336,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None) try: dirs[path] = os.path.getmtime(path) except FileNotFoundError: - logging.warning(f"Warning: Unable to access {path}. Skipping this path.") + logging.warning("Warning: Unable to access %s. Skipping this path.", path) continue logging.debug("found {} files".format(len(result))) return result, dirs diff --git a/main.py b/main.py index 0e07a95da..4643dba5d 100644 --- a/main.py +++ b/main.py @@ -58,7 +58,12 @@ if __name__ == "__main__": def handle_comfyui_manager_unavailable(): if not args.windows_standalone_build: - logging.warning(f"\n\nYou appear to be running comfyui-manager from source, this is not recommended. Please install comfyui-manager using the following command:\ncommand:\n\t{sys.executable} -m pip install --pre comfyui_manager\n") + logging.warning(""" + +You appear to be running comfyui-manager from source, this is not recommended. Please install comfyui-manager using the following command: +command: + %s -m pip install --pre comfyui_manager +""", sys.executable) args.enable_manager = False @@ -85,7 +90,7 @@ def apply_custom_paths(): # --output-directory, --input-directory, --user-directory if args.output_directory: output_dir = os.path.abspath(args.output_directory) - logging.info(f"Setting output directory to: {output_dir}") + logging.info("Setting output directory to: %s", output_dir) folder_paths.set_output_directory(output_dir) # These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes @@ -98,12 +103,12 @@ def apply_custom_paths(): if args.input_directory: input_dir = os.path.abspath(args.input_directory) - logging.info(f"Setting input directory to: {input_dir}") + logging.info("Setting input directory to: %s", input_dir) folder_paths.set_input_directory(input_dir) if args.user_directory: user_dir = os.path.abspath(args.user_directory) - logging.info(f"Setting user directory to: {user_dir}") + logging.info("Setting user directory to: %s", user_dir) folder_paths.set_user_directory(user_dir) @@ -119,7 +124,7 @@ def execute_prestartup_script(): spec.loader.exec_module(module) return True except Exception as e: - logging.error(f"Failed to execute startup-script: {script_path} / {e}") + logging.error("Failed to execute startup-script: %s / %s", script_path, e) return False node_paths = folder_paths.get_folder_paths("custom_nodes") @@ -140,7 +145,7 @@ def execute_prestartup_script(): script_path = os.path.join(module_path, "prestartup_script.py") if os.path.exists(script_path): if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: - logging.info(f"Prestartup Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") + logging.info("Prestartup Skipping %s due to disable_all_custom_nodes and whitelist_custom_nodes", possible_module) continue time_before = time.perf_counter() success = execute_script(script_path) @@ -246,7 +251,7 @@ def prompt_worker(q, server_instance): # Log Time in a more readable way after 10 minutes if execution_time > 600: execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time)) - logging.info(f"Prompt executed in {execution_time}") + logging.info("Prompt executed in %s", execution_time) else: logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) @@ -325,7 +330,7 @@ def setup_database(): if dependencies_available(): init_db() except Exception as e: - logging.error(f"Failed to initialize database. Please ensure you have installed the latest requirements. If the error persists, please report this as in future the database will be required: {e}") + logging.error("Failed to initialize database. Please ensure you have installed the latest requirements. If the error persists, please report this as in future the database will be required: %s", e) def start_comfyui(asyncio_loop=None): @@ -335,7 +340,7 @@ def start_comfyui(asyncio_loop=None): """ if args.temp_directory: temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp") - logging.info(f"Setting temp directory to: {temp_dir}") + logging.info("Setting temp directory to: %s", temp_dir) folder_paths.set_temp_directory(temp_dir) cleanup_temp() diff --git a/nodes.py b/nodes.py index 56b74ebe3..31e3b8b61 100644 --- a/nodes.py +++ b/nodes.py @@ -2173,7 +2173,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) except Exception as e: - logging.warning(f"Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': {e}") + logging.warning("Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': %s", e) if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) @@ -2193,7 +2193,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom elif hasattr(module, "comfy_entrypoint"): entrypoint = getattr(module, "comfy_entrypoint") if not callable(entrypoint): - logging.warning(f"comfy_entrypoint in {module_path} is not callable, skipping.") + logging.warning("comfy_entrypoint in %s is not callable, skipping.", module_path) return False try: if inspect.iscoroutinefunction(entrypoint): @@ -2201,11 +2201,11 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom else: extension = entrypoint() if not isinstance(extension, ComfyExtension): - logging.warning(f"comfy_entrypoint in {module_path} did not return a ComfyExtension, skipping.") + logging.warning("comfy_entrypoint in %s did not return a ComfyExtension, skipping.", module_path) return False node_list = await extension.get_node_list() if not isinstance(node_list, list): - logging.warning(f"comfy_entrypoint in {module_path} did not return a list of nodes, skipping.") + logging.warning("comfy_entrypoint in %s did not return a list of nodes, skipping.", module_path) return False for node_cls in node_list: node_cls: io.ComfyNode @@ -2217,14 +2217,14 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom NODE_DISPLAY_NAME_MAPPINGS[schema.node_id] = schema.display_name return True except Exception as e: - logging.warning(f"Error while calling comfy_entrypoint in {module_path}: {e}") + logging.warning("Error while calling comfy_entrypoint in %s: %s", module_path, e) return False else: - logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).") + logging.warning("Skip %s module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).", module_path) return False except Exception as e: logging.warning(traceback.format_exc()) - logging.warning(f"Cannot import {module_path} module for custom nodes: {e}") + logging.warning("Cannot import %s module for custom nodes: %s", module_path, e) return False async def init_external_custom_nodes(): @@ -2252,12 +2252,12 @@ async def init_external_custom_nodes(): if module_path.endswith(".disabled"): continue if args.disable_all_custom_nodes and possible_module not in args.whitelist_custom_nodes: - logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes") + logging.info("Skipping %s due to disable_all_custom_nodes and whitelist_custom_nodes", possible_module) continue if args.enable_manager: if comfyui_manager.should_be_disabled(module_path): - logging.info(f"Blocked by policy: {module_path}") + logging.info("Blocked by policy: %s", module_path) continue time_before = time.perf_counter() diff --git a/server.py b/server.py index 70c8b5e3b..d7458d2c7 100644 --- a/server.py +++ b/server.py @@ -234,7 +234,7 @@ class PromptServer(): if args.front_end_root is None else args.front_end_root ) - logging.info(f"[Prompt Server] web root: {self.web_root}") + logging.info("[Prompt Server] web root: %s", self.web_root) routes = web.RouteTableDef() self.routes = routes self.last_node_id = None @@ -296,7 +296,7 @@ class PromptServer(): f"Invalid JSON received from client {sid}: {msg.data}" ) except Exception as e: - logging.error(f"Error processing WebSocket message: {e}") + logging.error("Error processing WebSocket message: %s", e) finally: self.sockets.pop(sid, None) self.sockets_metadata.pop(sid, None) @@ -689,7 +689,7 @@ class PromptServer(): try: out[x] = node_info(x) except Exception: - logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.") + logging.error("[ERROR] An error occurred while retrieving information for the '%s' node.", x) logging.error(traceback.format_exc()) return web.json_response(out) @@ -935,14 +935,14 @@ class PromptServer(): for item in currently_running: # item structure: (number, prompt_id, prompt, extra_data, outputs_to_execute) if item[1] == prompt_id: - logging.info(f"Interrupting prompt {prompt_id}") + logging.info("Interrupting prompt %s", prompt_id) should_interrupt = True break if should_interrupt: nodes.interrupt_processing() else: - logging.info(f"Prompt {prompt_id} is not currently running, skipping interrupt") + logging.info("Prompt %s is not currently running, skipping interrupt", prompt_id) else: # No prompt_id provided, do a global interrupt logging.info("Global interrupt (no prompt_id specified)")