diff --git a/app/frontend_management.py b/app/frontend_management.py index 6ac59ca15..3db184bfe 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -87,7 +87,7 @@ ________________________________________________________________________ """.strip() ) else: - logging.info("ComfyUI frontend version: {}".format(frontend_version_str)) + logging.info("ComfyUI frontend version: %s", frontend_version_str) except Exception as e: logging.error("Failed to check frontend version: %s", e) @@ -390,12 +390,12 @@ comfyui-workflow-templates is not installed. ) if os.path.exists(expected_path): logging.info( - f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}" + "Using existing copy of specific frontend version tag: %s/%s@%s", repo_owner, repo_name, version ) return expected_path logging.info( - f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub..." + "Initializing frontend: %s/%s@%s, requesting version details from GitHub...", repo_owner, repo_name, version ) provider = provider or FrontEndProvider(repo_owner, repo_name) diff --git a/comfy/audio_encoders/audio_encoders.py b/comfy/audio_encoders/audio_encoders.py index 46ef21c95..cc112b181 100644 --- a/comfy/audio_encoders/audio_encoders.py +++ b/comfy/audio_encoders/audio_encoders.py @@ -84,8 +84,8 @@ def load_audio_encoder_from_sd(sd, prefix=""): audio_encoder = AudioEncoderModel(config) m, u = audio_encoder.load_sd(sd) if len(m) > 0: - logging.warning("missing audio encoder: {}".format(m)) + logging.warning("missing audio encoder: %s", m) if len(u) > 0: - logging.warning("unexpected audio encoder: {}".format(u)) + logging.warning("unexpected audio encoder: %s", u) return audio_encoder diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index d5fc53497..d3ebf9d69 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -130,7 +130,7 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False): clip = ClipVisionModel(json_config) m, u = clip.load_sd(sd) if len(m) > 0: - logging.warning("missing clip vision: {}".format(m)) + logging.warning("missing clip vision: %s", m) u = set(u) keys = list(sd.keys()) for k in keys: diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 0b5e30f52..18b64461b 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -442,10 +442,10 @@ def controlnet_load_state_dict(control_model, sd): missing, unexpected = control_model.load_state_dict(sd, strict=False) if len(missing) > 0: - logging.warning("missing controlnet keys: {}".format(missing)) + logging.warning("missing controlnet keys: %s", missing) if len(unexpected) > 0: - logging.debug("unexpected controlnet keys: {}".format(unexpected)) + logging.debug("unexpected controlnet keys: %s", unexpected) return control_model @@ -668,7 +668,7 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}): leftover_keys = controlnet_data.keys() if len(leftover_keys) > 0: - logging.warning("leftover keys: {}".format(leftover_keys)) + logging.warning("leftover keys: %s", leftover_keys) controlnet_data = new_sd elif "controlnet_blocks.0.weight" in controlnet_data: if "double_blocks.0.img_attn.norm.key_norm.scale" in controlnet_data: @@ -753,10 +753,10 @@ def load_controlnet_state_dict(state_dict, model=None, model_options={}): missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False) if len(missing) > 0: - logging.warning("missing controlnet keys: {}".format(missing)) + logging.warning("missing controlnet keys: %s", missing) if len(unexpected) > 0: - logging.debug("unexpected controlnet keys: {}".format(unexpected)) + logging.debug("unexpected controlnet keys: %s", unexpected) global_average_pooling = model_options.get("global_average_pooling", False) control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype) @@ -771,7 +771,7 @@ def load_controlnet(ckpt_path, model=None, model_options={}): cnet = load_controlnet_state_dict(comfy.utils.load_torch_file(ckpt_path, safe_load=True), model=model, model_options=model_options) if cnet is None: - logging.error("error checkpoint does not contain controlnet or t2i adapter data {}".format(ckpt_path)) + logging.error("error checkpoint does not contain controlnet or t2i adapter data %s", ckpt_path) return cnet class T2IAdapter(ControlBase): @@ -876,9 +876,9 @@ def load_t2i_adapter(t2i_data, model_options={}): #TODO: model_options missing, unexpected = model_ad.load_state_dict(t2i_data) if len(missing) > 0: - logging.warning("t2i missing {}".format(missing)) + logging.warning("t2i missing", missing) if len(unexpected) > 0: - logging.debug("t2i unexpected {}".format(unexpected)) + logging.debug("t2i unexpected", unexpected) return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm) diff --git a/comfy/ldm/cosmos/blocks.py b/comfy/ldm/cosmos/blocks.py index afb43d469..7af80bb73 100644 --- a/comfy/ldm/cosmos/blocks.py +++ b/comfy/ldm/cosmos/blocks.py @@ -295,7 +295,7 @@ class TimestepEmbedding(nn.Module): def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, weight_args={}, operations=None): super().__init__() logging.debug( - f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility." + "Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora ) self.linear_1 = operations.Linear(in_features, out_features, bias=not use_adaln_lora, **weight_args) self.activation = nn.SiLU() diff --git a/comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py b/comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py index 9a3ebed6a..de95cbf45 100644 --- a/comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py +++ b/comfy/ldm/cosmos/cosmos_tokenizer/layers3d.py @@ -632,9 +632,7 @@ class DecoderBase(nn.Module): curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) self.z_shape = (1, z_channels, curr_res, curr_res) logging.debug( - "Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape) - ) + "Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape) ) # z to block_in @@ -929,9 +927,7 @@ class DecoderFactorized(nn.Module): curr_res = (resolution // patch_size) // 2 ** (self.num_resolutions - 1) self.z_shape = (1, z_channels, curr_res, curr_res) logging.debug( - "Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape) - ) + "Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape) ) # z to block_in diff --git a/comfy/ldm/cosmos/predict2.py b/comfy/ldm/cosmos/predict2.py index 931dcb52a..f8a15a51c 100644 --- a/comfy/ldm/cosmos/predict2.py +++ b/comfy/ldm/cosmos/predict2.py @@ -118,13 +118,20 @@ class Attention(nn.Module): operations=None, ) -> None: super().__init__() + + context_dim = query_dim if context_dim is None else context_dim + logging.debug( - f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " - f"{n_heads} heads with a dimension of {head_dim}." + "Setting up %s. Query dim is %d, context_dim is %d and using " + "%d heads with a dimension of %d.", + self.__class__.__name__, + query_dim, + context_dim, + n_heads, + head_dim, ) self.is_selfattn = context_dim is None # self attention - context_dim = query_dim if context_dim is None else context_dim inner_dim = head_dim * n_heads self.n_heads = n_heads @@ -226,7 +233,7 @@ class TimestepEmbedding(nn.Module): def __init__(self, in_features: int, out_features: int, use_adaln_lora: bool = False, device=None, dtype=None, operations=None): super().__init__() logging.debug( - f"Using AdaLN LoRA Flag: {use_adaln_lora}. We enable bias if no AdaLN LoRA for backward compatibility." + "Using AdaLN LoRA Flag: %s. We enable bias if no AdaLN LoRA for backward compatibility.", use_adaln_lora ) self.in_dim = in_features self.out_dim = out_features diff --git a/comfy/ldm/cosmos/vae.py b/comfy/ldm/cosmos/vae.py index 15457e26a..3e3a555b3 100644 --- a/comfy/ldm/cosmos/vae.py +++ b/comfy/ldm/cosmos/vae.py @@ -92,7 +92,7 @@ class CausalContinuousVideoTokenizer(nn.Module): num_parameters = sum(param.numel() for param in self.parameters()) logging.debug("model=%s, num_parameters=%d", self.name, num_parameters) logging.debug( - f"z_channels={z_channels}, latent_channels={self.latent_channels}." + "z_channels=%d, latent_channels=%d.", z_channels, self.latent_channels ) latent_temporal_chunk = 16 diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index e355e47f7..eedbda0fa 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -390,7 +390,7 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape steps *= 2 if steps > 64: raise e - logging.warning("out of memory error, increasing steps and trying again {}".format(steps)) + logging.warning("out of memory error, increasing steps and trying again", steps) else: raise e @@ -556,7 +556,7 @@ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape= try: out = sageattn(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout) except Exception as e: - logging.error("Error running sage attention: {}, using pytorch attention instead.".format(e)) + logging.error("Error running sage attention: %s, using pytorch attention instead.", e) exception_fallback = True if exception_fallback: if tensor_layout == "NHD": diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 1ae3ef034..e4ce43d64 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -272,7 +272,7 @@ def slice_attention(q, k, v): steps *= 2 if steps > 128: raise e - logging.warning("out of memory error, increasing steps and trying again {}".format(steps)) + logging.warning("out of memory error, increasing steps and trying again %s", steps) return r1 @@ -725,8 +725,7 @@ class Decoder(nn.Module): block_in = ch*ch_mult[self.num_resolutions-1] curr_res = resolution // 2**(self.num_resolutions-1) self.z_shape = (1,z_channels,curr_res,curr_res) - logging.debug("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) + logging.debug("Working with z of shape %s = %d dimensions.", self.z_shape, np.prod(self.z_shape)) # z to block_in self.conv_in = conv_op(z_channels, diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 4c8d53cac..6af33b942 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -369,7 +369,7 @@ def apply_control(h, control, name): try: h += ctrl except: - logging.warning("warning control could not be applied {} {}".format(h.shape, ctrl.shape)) + logging.warning("warning control could not be applied %s %s", h.shape, ctrl.shape) return h class UNetModel(nn.Module): diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index d45dbbd38..90ac06e97 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -144,8 +144,7 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) if verbose: logging.info("Selected alphas for ddim sampler: a_t: %s; a_(t-1): %s", alphas, alphas_prev) - logging.info(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + logging.info("For the chosen value of eta, which is %s, this results in the following sigma_t schedule for ddim sampler %s", eta, sigmas) return sigmas, alphas, alphas_prev diff --git a/comfy/lora.py b/comfy/lora.py index 2ed0acb9d..65bc342d4 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -90,7 +90,7 @@ def load_lora(lora, to_load, log_missing=True): if log_missing: for x in lora.keys(): if x not in loaded_keys: - logging.warning("lora key not loaded: {}".format(x)) + logging.warning("lora key not loaded: %s", x) return patch_dict @@ -390,7 +390,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori if isinstance(v, weight_adapter.WeightAdapterBase): output = v.calculate_weight(weight, key, strength, strength_model, offset, function, intermediate_dtype, original_weights) if output is None: - logging.warning("Calculate Weight Failed: {} {}".format(v.name, key)) + logging.warning("Calculate Weight Failed: %s %s", v.name, key) else: weight = output if old_weight is not None: @@ -408,12 +408,12 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori # An extra flag to pad the weight if the diff's shape is larger than the weight do_pad_weight = len(v) > 1 and v[1]['pad_weight'] if do_pad_weight and diff.shape != weight.shape: - logging.info("Pad weight {} from {} to shape: {}".format(key, weight.shape, diff.shape)) + logging.info("Pad weight %s from %s to shape: %s", key, weight.shape, diff.shape) weight = pad_tensor_to_shape(weight, diff.shape) if strength != 0.0: if diff.shape != weight.shape: - logging.warning("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, diff.shape, weight.shape)) + logging.warning("WARNING SHAPE MISMATCH %s WEIGHT NOT MERGED %s != %s", key, diff.shape, weight.shape) else: weight += function(strength * comfy.model_management.cast_to_device(diff, weight.device, weight.dtype)) elif patch_type == "set": @@ -424,7 +424,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32, ori comfy.model_management.cast_to_device(original_weights[key][0][0], weight.device, intermediate_dtype) weight += function(strength * comfy.model_management.cast_to_device(diff_weight, weight.device, weight.dtype)) else: - logging.warning("patch type not recognized {} {}".format(patch_type, key)) + logging.warning("patch type not recognized %s %s", patch_type, key) if old_weight is not None: weight = old_weight diff --git a/comfy/model_base.py b/comfy/model_base.py index 49efd700b..409a1a1f5 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -144,7 +144,7 @@ class BaseModel(torch.nn.Module): if comfy.model_management.force_channels_last(): self.diffusion_model.to(memory_format=torch.channels_last) logging.debug("using channels last mode for diffusion model") - logging.info("model weight dtype {}, manual cast: {}".format(self.get_dtype(), self.manual_cast_dtype)) + logging.info("model weight dtype %s, manual cast: %s", self.get_dtype(), self.manual_cast_dtype) self.model_type = model_type self.model_sampling = model_sampling(model_config, model_type) @@ -153,8 +153,8 @@ class BaseModel(torch.nn.Module): self.adm_channels = 0 self.concat_keys = () - logging.info("model_type {}".format(model_type.name)) - logging.debug("adm {}".format(self.adm_channels)) + logging.info("model_type %s", model_type.name) + logging.debug("adm %s", self.adm_channels) self.memory_usage_factor = model_config.memory_usage_factor self.memory_usage_factor_conds = () self.memory_usage_shape_process = {} @@ -308,10 +308,10 @@ class BaseModel(torch.nn.Module): to_load = self.model_config.process_unet_state_dict(to_load) m, u = self.diffusion_model.load_state_dict(to_load, strict=False) if len(m) > 0: - logging.warning("unet missing: {}".format(m)) + logging.warning("unet missing: %s", m) if len(u) > 0: - logging.warning("unet unexpected: {}".format(u)) + logging.warning("unet unexpected: %s", u) del to_load return self diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 0853b3aec..d26811d41 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -775,7 +775,7 @@ def model_config_from_unet_config(unet_config, state_dict=None): if model_config.matches(unet_config, state_dict): return model_config(unet_config) - logging.error("no match {}".format(unet_config)) + logging.error("no match %s", unet_config) return None def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=False, metadata=None): diff --git a/comfy/model_management.py b/comfy/model_management.py index 9fc950a61..c2e3e8516 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -98,7 +98,8 @@ if args.directml is not None: directml_device = torch_directml.device() else: directml_device = torch_directml.device(device_index) - logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index))) + logging.info("Using directml with device: %s", torch_directml.device_name(device_index)) + # torch_directml.disable_tiled_resources(True) lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default. @@ -238,13 +239,13 @@ def mac_version(): total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) -logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) +logging.info("Total VRAM %0.0f MB, total RAM %0.0f MB", total_vram, total_ram) try: - logging.info("pytorch version: {}".format(torch_version)) + logging.info("pytorch version: %s", torch_version) mac_ver = mac_version() if mac_ver is not None: - logging.info("Mac Version {}".format(mac_ver)) + logging.info("Mac Version %s", mac_ver) except: pass @@ -268,7 +269,7 @@ else: pass try: XFORMERS_VERSION = xformers.version.__version__ - logging.info("xformers version: {}".format(XFORMERS_VERSION)) + logging.info("xformers version: %s", XFORMERS_VERSION) if XFORMERS_VERSION.startswith("0.0.18"): logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.") logging.warning("Please downgrade or upgrade xformers to a different version.\n") @@ -349,8 +350,8 @@ try: except: rocm_version = (6, -1) - logging.info("AMD arch: {}".format(arch)) - logging.info("ROCm version: {}".format(rocm_version)) + logging.info("AMD arch: %s", arch) + logging.info("ROCm version: %s", rocm_version) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: if importlib.util.find_spec('triton') is not None: # AMD efficient attention implementation depends on triton. TODO: better way of detecting if it's compiled in or not. if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much @@ -444,7 +445,7 @@ def get_torch_device_name(device): return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) try: - logging.info("Device: {}".format(get_torch_device_name(get_torch_device()))) + logging.info("Device: %s", get_torch_device_name(get_torch_device())) except: logging.warning("Could not pick default device.") @@ -573,7 +574,7 @@ if WINDOWS: if args.reserve_vram is not None: EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024 - logging.debug("Reserving {}MB vram for other applications.".format(EXTRA_RESERVED_VRAM / (1024 * 1024))) + logging.debug("Reserving %0.2f MB of VRAM as per user request.", EXTRA_RESERVED_VRAM / (1024 * 1024)) def extra_reserved_memory(): return EXTRA_RESERVED_VRAM @@ -678,7 +679,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu free_mem = get_free_memory(device) if free_mem < minimum_memory_required: models_l = free_memory(minimum_memory_required, device) - logging.info("{} models unloaded.".format(len(models_l))) + logging.info("%d models unloaded.", len(models_l)) for loaded_model in models_to_load: model = loaded_model.model @@ -724,7 +725,7 @@ def cleanup_models_gc(): for i in range(len(current_loaded_models)): cur = current_loaded_models[i] if cur.is_dead(): - logging.info("Potential memory leak detected with model {}, doing a full garbage collect, for maximum performance avoid circular references in the model code.".format(cur.real_model().__class__.__name__)) + logging.info("Potential memory leak detected with model %s, doing a full garbage collect, for maximum performance avoid circular references in the model code.", cur.real_model().__class__.__name__) do_gc = True break @@ -735,7 +736,7 @@ def cleanup_models_gc(): for i in range(len(current_loaded_models)): cur = current_loaded_models[i] if cur.is_dead(): - logging.warning("WARNING, memory leak with model {}. Please make sure it is not being referenced from somewhere.".format(cur.real_model().__class__.__name__)) + logging.warning("WARNING, memory leak with model %s. Please make sure it is not being referenced from somewhere.", cur.real_model().__class__.__name__) @@ -1027,7 +1028,7 @@ if args.disable_async_offload: NUM_STREAMS = 0 if NUM_STREAMS > 0: - logging.info("Using async weight offloading with {} streams".format(NUM_STREAMS)) + logging.info("Using async weight offloading with %d streams", NUM_STREAMS) def current_stream(device): if device is None: @@ -1122,7 +1123,7 @@ if not args.disable_pinned_memory: MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% else: MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 - logging.info("Enabled pinned memory {}".format(MAX_PINNED_MEMORY // (1024 * 1024))) + logging.info("Enabled pinned memory. %0.2f MB max", MAX_PINNED_MEMORY / (1024 * 1024)) PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"]) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 2bcea5207..310d5a5bd 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -778,7 +778,7 @@ class ModelPatcher: if comfy.model_management.is_device_cuda(device_to): torch.cuda.synchronize() - logging.debug("lowvram: loaded module regularly {} {}".format(n, m)) + logging.debug("lowvram: loaded module regularly %s to %s", n, m) m.comfy_patched_weights = True for x in load_completely: @@ -791,10 +791,10 @@ class ModelPatcher: self.pin_weight_to_device("{}.{}".format(n, param)) if lowvram_counter > 0: - logging.info("loaded partially; {:.2f} MB usable, {:.2f} MB loaded, {:.2f} MB offloaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter)) + logging.info("loaded partially; %.2f MB usable, %.2f MB loaded, %.2f MB offloaded, %.2f MB buffer reserved, lowvram patches: %d", lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), lowvram_mem_counter / (1024 * 1024), offload_buffer / (1024 * 1024), patch_counter) self.model.model_lowvram = True else: - logging.info("loaded completely; {:.2f} MB usable, {:.2f} MB loaded, full load: {}".format(lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load)) + logging.info("loaded completely; %.2f MB usable, %.2f MB loaded, full load: %s", lowvram_model_memory / (1024 * 1024), mem_counter / (1024 * 1024), full_load) self.model.model_lowvram = False if full_load: self.model.to(device_to) @@ -941,7 +941,7 @@ class ModelPatcher: offload_buffer = max(offload_buffer, potential_offload) offload_weight_factor.append(module_mem) offload_weight_factor.pop(0) - logging.debug("freed {}".format(n)) + logging.debug("freed %s", n) for param in params: self.pin_weight_to_device("{}.{}".format(n, param)) @@ -951,7 +951,7 @@ class ModelPatcher: self.model.lowvram_patch_counter += patch_counter self.model.model_loaded_weight_memory -= memory_freed self.model.model_offload_buffer_memory = offload_buffer - logging.info("Unloaded partially: {:.2f} MB freed, {:.2f} MB remains loaded, {:.2f} MB buffer reserved, lowvram patches: {}".format(memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter)) + logging.info("Unloaded partially: %.2f MB freed, %.2f MB remains loaded, %.2f MB buffer reserved, lowvram patches: %d", memory_freed / (1024 * 1024), self.model.model_loaded_weight_memory / (1024 * 1024), offload_buffer / (1024 * 1024), self.model.lowvram_patch_counter) return memory_freed def partially_load(self, device_to, extra_memory=0, force_patch_weights=False): diff --git a/comfy/ops.py b/comfy/ops.py index cd536e22d..ebef979ab 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -455,7 +455,7 @@ class fp8_ops(manual_cast): if out is not None: return out except Exception as e: - logging.info("Exception during fp8 op: {}".format(e)) + logging.info("Exception during fp8 op: %s", str(e)) weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) x = torch.nn.functional.linear(input, weight, bias) diff --git a/comfy/sd.py b/comfy/sd.py index 32157e18b..adc6310ef 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -95,7 +95,7 @@ def load_lora_for_models(model, clip, lora, strength_model, strength_clip): k1 = set(k1) for x in loaded: if (x not in k) and (x not in k1): - logging.warning("NOT LOADED {}".format(x)) + logging.warning("NOT LOADED %s", x) return (new_modelpatcher, new_clip) @@ -139,27 +139,27 @@ class CLIP: for c in state_dict: m, u = self.load_sd(c) if len(m) > 0: - logging.warning("clip missing: {}".format(m)) + logging.warning("clip missing: %s", m) if len(u) > 0: - logging.debug("clip unexpected: {}".format(u)) + logging.debug("clip unexpected: %s", u) else: m, u = self.load_sd(state_dict, full_model=True) if len(m) > 0: m_filter = list(filter(lambda a: ".logit_scale" not in a and ".transformer.text_projection.weight" not in a, m)) if len(m_filter) > 0: - logging.warning("clip missing: {}".format(m)) + logging.warning("clip missing: %s", m) else: - logging.debug("clip missing: {}".format(m)) + logging.debug("clip missing: %s", m) if len(u) > 0: - logging.debug("clip unexpected {}:".format(u)) + logging.debug("clip unexpected %s:", u) if params['device'] == load_device: model_management.load_models_gpu([self.patcher], force_full_load=True) self.layer_idx = None self.use_clip_schedule = False - logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype)) + logging.info("CLIP/text encoder model load device: %s, offload device: %s, current: %s, dtype: %s", load_device, offload_device, params['device'], dtype) self.tokenizer_options = {} def clone(self): @@ -664,10 +664,10 @@ class VAE: m, u = self.first_stage_model.load_state_dict(sd, strict=False) if len(m) > 0: - logging.warning("Missing VAE keys {}".format(m)) + logging.warning("Missing VAE keys %s", str(m)) if len(u) > 0: - logging.debug("Leftover VAE keys {}".format(u)) + logging.debug("Leftover VAE keys %s", str(u)) if device is None: device = model_management.vae_device() @@ -680,7 +680,7 @@ class VAE: self.output_device = model_management.intermediate_device() self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device) - logging.info("VAE load device: {}, offload device: {}, dtype: {}".format(self.device, offload_device, self.vae_dtype)) + logging.info("VAE load device: %s, offload device: %s, dtype: %s", self.device, offload_device, self.vae_dtype) self.model_size() def model_size(self): @@ -1440,7 +1440,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c left_over = sd.keys() if len(left_over) > 0: - logging.debug("left over keys: {}".format(left_over)) + logging.debug("left over keys: %s", left_over) if output_model: model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device()) @@ -1510,7 +1510,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): if k in sd: new_sd[diffusers_keys[k]] = sd.pop(k) else: - logging.warning("{} {}".format(diffusers_keys[k], k)) + logging.warning("%s %s", diffusers_keys[k], k) offload_device = model_management.unet_offload_device() unet_weight_dtype = list(model_config.supported_inference_dtypes) @@ -1539,7 +1539,7 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): model.load_model_weights(new_sd, "") left_over = sd.keys() if len(left_over) > 0: - logging.info("left over keys in diffusion model: {}".format(left_over)) + logging.info("left over keys in diffusion model: %s", left_over) return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device) @@ -1547,7 +1547,7 @@ def load_diffusion_model(unet_path, model_options={}): sd, metadata = comfy.utils.load_torch_file(unet_path, return_metadata=True) model = load_diffusion_model_state_dict(sd, model_options=model_options, metadata=metadata) if model is None: - logging.error("ERROR UNSUPPORTED DIFFUSION MODEL {}".format(unet_path)) + logging.error("ERROR UNSUPPORTED DIFFUSION MODEL %s", unet_path) raise RuntimeError("ERROR: Could not detect model type of: {}\n{}".format(unet_path, model_detection_error_hint(unet_path, sd))) return model diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index faaa0a59b..72da31646 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -235,7 +235,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): else: index += -1 pad_extra += emb_shape - logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored {} != {}".format(emb.shape[-1], tokens_embed.shape[-1])) + logging.warning("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored %s != %s", emb.shape[-1], tokens_embed.shape[-1]) if pad_extra > 0: padd_embed = self.transformer.get_input_embeddings()(torch.tensor([[self.special_tokens["pad"]] * pad_extra], device=device, dtype=torch.long), out_dtype=torch.float32) @@ -438,7 +438,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No except: embed_out = safe_load_embed_zip(embed_path) except Exception: - logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name)) + logging.warning("%s\n\nerror loading embedding, skipping loading: %s", traceback.format_exc(), embedding_name) return None if embed_out is None: diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 0e7a829ba..8c8b76620 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -120,5 +120,5 @@ class BASE: self.manual_cast_dtype = manual_cast_dtype def __getattr__(self, name): - logging.warning("\nWARNING, you accessed {} from the model config object which doesn't exist. Please fix your code.\n".format(name)) + logging.warning("\nWARNING, you accessed %s from the model config object which doesn't exist. Please fix your code.\n", name) return None diff --git a/comfy/text_encoders/ace.py b/comfy/text_encoders/ace.py index d650bb10d..0a2df7eac 100644 --- a/comfy/text_encoders/ace.py +++ b/comfy/text_encoders/ace.py @@ -80,7 +80,7 @@ class VoiceBpeTokenizer: token_idx = self.encode(line, lang) lyric_token_idx = lyric_token_idx + token_idx + [2] except Exception as e: - logging.warning("tokenize error {} for line {} major_language {}".format(e, line, lang)) + logging.warning("tokenize error %s for line %s major_language %s", e, line, lang) return {"input_ids": lyric_token_idx} @staticmethod diff --git a/comfy/text_encoders/hidream.py b/comfy/text_encoders/hidream.py index 600b34480..afcf39d6c 100644 --- a/comfy/text_encoders/hidream.py +++ b/comfy/text_encoders/hidream.py @@ -62,7 +62,7 @@ class HiDreamTEModel(torch.nn.Module): else: self.llama = None - logging.debug("Created HiDream text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}, llama {}:{}".format(clip_l, clip_g, t5, dtype_t5, llama, dtype_llama)) + logging.debug("Created HiDream text encoder with: clip_l %s, clip_g %s, t5xxl %s:%s, llama %s:%s", clip_l, clip_g, t5, dtype_t5, llama, dtype_llama) def set_clip_options(self, options): if self.clip_l is not None: diff --git a/comfy/text_encoders/sd3_clip.py b/comfy/text_encoders/sd3_clip.py index 8b153c72b..10a238a02 100644 --- a/comfy/text_encoders/sd3_clip.py +++ b/comfy/text_encoders/sd3_clip.py @@ -81,7 +81,7 @@ class SD3ClipModel(torch.nn.Module): else: self.t5xxl = None - logging.debug("Created SD3 text encoder with: clip_l {}, clip_g {}, t5xxl {}:{}".format(clip_l, clip_g, t5, dtype_t5)) + logging.debug("Created SD3 text encoder with: clip_l %s, clip_g %s, t5xxl %s:%s", clip_l, clip_g, t5, dtype_t5) def set_clip_options(self, options): if self.clip_l is not None: diff --git a/comfy/utils.py b/comfy/utils.py index ffa98c9b1..6da70dc13 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -86,7 +86,7 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): if safe_load or ALWAYS_SAFE_LOAD: pl_sd = torch.load(ckpt, map_location=device, weights_only=True, **torch_args) else: - logging.warning("WARNING: loading {} unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.".format(ckpt)) + logging.warning("WARNING: loading %s unsafely, upgrade your pytorch to 2.4 or newer to load this file safely.", ckpt) pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) if "state_dict" in pl_sd: sd = pl_sd["state_dict"] diff --git a/comfy/weight_adapter/boft.py b/comfy/weight_adapter/boft.py index b2a2f1bd4..20581924c 100644 --- a/comfy/weight_adapter/boft.py +++ b/comfy/weight_adapter/boft.py @@ -111,5 +111,5 @@ class BOFTAdapter(WeightAdapterBase): else: weight += function((strength * lora_diff).type(weight.dtype)) except Exception as e: - logging.error("ERROR {} {} {}".format(self.name, key, e)) + logging.error("ERROR %s %s %s", self.name, key, e) return weight diff --git a/comfy/weight_adapter/glora.py b/comfy/weight_adapter/glora.py index 939abbba5..c5f77a562 100644 --- a/comfy/weight_adapter/glora.py +++ b/comfy/weight_adapter/glora.py @@ -89,5 +89,5 @@ class GLoRAAdapter(WeightAdapterBase): else: weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) except Exception as e: - logging.error("ERROR {} {} {}".format(self.name, key, e)) + logging.error("ERROR %s %s %s", self.name, key, e) return weight diff --git a/comfy/weight_adapter/loha.py b/comfy/weight_adapter/loha.py index 0abb2d403..4c771c521 100644 --- a/comfy/weight_adapter/loha.py +++ b/comfy/weight_adapter/loha.py @@ -228,5 +228,5 @@ class LoHaAdapter(WeightAdapterBase): else: weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) except Exception as e: - logging.error("ERROR {} {} {}".format(self.name, key, e)) + logging.error("ERROR %s %s %s", self.name, key, e) return weight diff --git a/comfy/weight_adapter/lokr.py b/comfy/weight_adapter/lokr.py index 9b2aff2d7..b0bbc647c 100644 --- a/comfy/weight_adapter/lokr.py +++ b/comfy/weight_adapter/lokr.py @@ -216,5 +216,5 @@ class LoKrAdapter(WeightAdapterBase): else: weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) except Exception as e: - logging.error("ERROR {} {} {}".format(self.name, key, e)) + logging.error("ERROR %s %s %s", self.name, key, e) return weight diff --git a/comfy/weight_adapter/lora.py b/comfy/weight_adapter/lora.py index 3cc60bb1b..98af54331 100644 --- a/comfy/weight_adapter/lora.py +++ b/comfy/weight_adapter/lora.py @@ -208,5 +208,5 @@ class LoRAAdapter(WeightAdapterBase): else: weight += function(((strength * alpha) * lora_diff).type(weight.dtype)) except Exception as e: - logging.error("ERROR {} {} {}".format(self.name, key, e)) + logging.error("ERROR %s %s %s", self.name, key, e) return weight diff --git a/comfy/weight_adapter/oft.py b/comfy/weight_adapter/oft.py index c0aab9635..cd1e4af39 100644 --- a/comfy/weight_adapter/oft.py +++ b/comfy/weight_adapter/oft.py @@ -157,5 +157,5 @@ class OFTAdapter(WeightAdapterBase): else: weight += function((strength * lora_diff).type(weight.dtype)) except Exception as e: - logging.error("ERROR {} {} {}".format(self.name, key, e)) + logging.error("ERROR %s %s %s", self.name, key, e) return weight diff --git a/comfy_api/generate_api_stubs.py b/comfy_api/generate_api_stubs.py index 7520664b2..bb9239ff7 100644 --- a/comfy_api/generate_api_stubs.py +++ b/comfy_api/generate_api_stubs.py @@ -34,7 +34,7 @@ def generate_stubs_for_module(module_name: str) -> None: logging.info("Generated stub file for %s", module_name) else: logging.warning( - f"Module {module_name} has ComfyAPISync but no ComfyAPI" + "Module %s has ComfyAPISync but no ComfyAPI", module_name ) elif hasattr(module, "ComfyAPI"): @@ -49,7 +49,7 @@ def generate_stubs_for_module(module_name: str) -> None: logging.info("Generated stub file for %s", module_name) else: logging.warning( - f"Module {module_name} does not export ComfyAPI or ComfyAPISync" + "Module %s does not export ComfyAPI or ComfyAPISync", module_name ) except Exception as e: diff --git a/comfy_api/internal/async_to_sync.py b/comfy_api/internal/async_to_sync.py index 3b44f5469..e78c895af 100644 --- a/comfy_api/internal/async_to_sync.py +++ b/comfy_api/internal/async_to_sync.py @@ -282,7 +282,7 @@ class AsyncToSyncConverter: setattr(self._async_instance, attr_name, async_instance) except Exception as e: logging.warning( - f"Failed to create instance for {attr_name}: {e}" + "Failed to create instance for %s: %s", attr_name, e ) # Handle other instance attributes that might not be annotated @@ -981,7 +981,7 @@ class AsyncToSyncConverter: except Exception as e: # If stub generation fails, log the error but don't break the main functionality logging.error( - f"Error generating stub file for {sync_class.__name__}: {str(e)}" + "Error generating stub file for %s: %s", sync_class.__name__, str(e) ) import traceback diff --git a/comfy_extras/nodes_dataset.py b/comfy_extras/nodes_dataset.py index 874db3503..c1528343c 100644 --- a/comfy_extras/nodes_dataset.py +++ b/comfy_extras/nodes_dataset.py @@ -1002,7 +1002,7 @@ class ImageDeduplicationNode(ImageProcessingNode): if similarity >= similarity_threshold: is_duplicate = True logging.info( - f"Image {i} is similar to image {j} (similarity: {similarity:.3f}), skipping" + "Image %d is similar to image %d (similarity: %.3f), skipping", i, j, similarity ) break @@ -1012,7 +1012,7 @@ class ImageDeduplicationNode(ImageProcessingNode): # Return only unique images unique_images = [images[i] for i in keep_indices] logging.info( - f"Deduplication: kept {len(unique_images)} out of {len(images)} images" + "Deduplication: kept %d out of %d images", len(unique_images), len(images) ) return unique_images @@ -1086,7 +1086,7 @@ class ImageGridNode(ImageProcessingNode): grid.paste(img, (x, y)) logging.info( - f"Created {columns}x{rows} grid with {num_images} images ({grid_width}x{grid_height})" + "Created %d x %d grid with %d images (%d x %d)", columns, rows, num_images, grid_width, grid_height ) return pil_to_tensor(grid) @@ -1214,7 +1214,7 @@ class ResolutionBucket(io.ComfyNode): output_conditions.append(bucket_data["conditions"]) logging.info( - f"Resolution bucket ({h}x{w}): {len(bucket_data['latents'])} samples" + "Resolution bucket (%d x %d): %d samples", h, w, len(bucket_data["latents"]) ) logging.info("Created %s resolution buckets from %s samples", len(buckets), len(flat_latents)) @@ -1302,7 +1302,7 @@ class MakeTrainingDataset(io.ComfyNode): conditioning_list.append(cond) logging.info( - f"Created dataset with {len(latents_list)} latents and {len(conditioning_list)} conditioning." + "Created dataset with %d latents and %d conditioning.", len(latents_list), len(conditioning_list) ) return io.NodeOutput(latents_list, conditioning_list) @@ -1369,7 +1369,7 @@ class SaveTrainingDataset(io.ComfyNode): num_shards = (num_samples + shard_size - 1) // shard_size # Ceiling division logging.info( - f"Saving {num_samples} samples to {num_shards} shards in {output_dir}..." + "Saving %d samples to %d shards in %s...", num_samples, num_shards, output_dir ) # Save data in shards @@ -1391,7 +1391,7 @@ class SaveTrainingDataset(io.ComfyNode): torch.save(shard_data, f) logging.info( - f"Saved shard {shard_idx + 1}/{num_shards}: {shard_filename} ({end_idx - start_idx} samples)" + "Saved shard %d/%d: %s (%d samples)", shard_idx + 1, num_shards, shard_filename, end_idx - start_idx ) # Save metadata @@ -1477,7 +1477,7 @@ class LoadTrainingDataset(io.ComfyNode): logging.info("Loaded %s: %s samples", shard_file, len(shard_data['latents'])) logging.info( - f"Successfully loaded {len(all_latents)} samples from {dataset_dir}." + "Successfully loaded %d samples from %s.", len(all_latents), dataset_dir ) return io.NodeOutput(all_latents, all_conditioning) diff --git a/comfy_extras/nodes_easycache.py b/comfy_extras/nodes_easycache.py index 220684a7f..8c6e665d6 100644 --- a/comfy_extras/nodes_easycache.py +++ b/comfy_extras/nodes_easycache.py @@ -300,7 +300,7 @@ class EasyCacheHolder: return True if metadata == self.state_metadata: return True - logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state") + logging.warning("%s - Tensor shape, dtype or device changed, resetting state", self.name) self.reset() return False @@ -435,7 +435,7 @@ class LazyCacheHolder: return True if metadata == self.state_metadata: return True - logging.warn(f"{self.name} - Tensor shape, dtype or device changed, resetting state") + logging.warning("%s - Tensor shape, dtype or device changed, resetting state", self.name) self.reset() return False diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 3429b731e..a2c43019e 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -56,7 +56,7 @@ class FreeU(IO.ComfyNode): try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) except: - logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) + logging.warning("Device %s does not support the torch.fft functions used in the FreeU node, switching to CPU.", hsp.device) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: @@ -110,7 +110,7 @@ class FreeU_V2(IO.ComfyNode): try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) except: - logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) + logging.warning("Device %s does not support the torch.fft functions used in the FreeU node, switching to CPU.", hsp.device) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index 2a6a87a81..890a8dde0 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -27,7 +27,7 @@ def load_hypernetwork_patch(path, strength): } if activation_func not in valid_activation: - logging.error("Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format(path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)) + logging.error("Unsupported Hypernetwork format, if you report it I might implement it. %s %s %s %s %s %s", path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout) return None out = {} diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index a2375cba7..bd66049d6 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -65,7 +65,7 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() except: - logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + logging.warning("Could not generate lora weights for key %s, is the weight difference a zero?", k) elif lora_type == LORAType.FULL_DIFF: output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() diff --git a/execution.py b/execution.py index da05b4a18..438fe5157 100644 --- a/execution.py +++ b/execution.py @@ -83,7 +83,7 @@ class IsChangedCache: is_changed = await resolve_map_node_over_list_results(is_changed) node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed] except Exception as e: - logging.warning("WARNING: {}".format(e)) + logging.warning("WARNING: %s", e) node["is_changed"] = float("NaN") finally: self.is_changed[node_id] = node["is_changed"] @@ -601,13 +601,13 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed, if isinstance(ex, comfy.model_management.OOM_EXCEPTION): tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number." - logging.info("Memory summary: {}".format(comfy.model_management.debug_memory_summary())) + logging.info("Memory summary: %s", comfy.model_management.debug_memory_summary()) logging.error("Got an OOM, unloading all loaded models.") comfy.model_management.unload_all_models() error_details = { "node_id": real_node_id, - "exception_message": "{}\n{}".format(ex, tips), + "exception_message": "%s\n%s" % (ex, tips), "exception_type": exception_type, "traceback": traceback.format_tb(tb), "current_inputs": input_data_formatted diff --git a/folder_paths.py b/folder_paths.py index 3abd4ce56..0d84ba225 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -316,7 +316,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None) except FileNotFoundError: logging.warning("Warning: Unable to access %s. Skipping this path.", directory) - logging.debug("recursive file list on directory {}".format(directory)) + logging.debug("recursive file list on directory %s", directory) dirpath: str subdirs: list[str] filenames: list[str] @@ -338,7 +338,7 @@ def recursive_search(directory: str, excluded_dir_names: list[str] | None=None) except FileNotFoundError: logging.warning("Warning: Unable to access %s. Skipping this path.", path) continue - logging.debug("found {} files".format(len(result))) + logging.debug("found %d files", len(result)) return result, dirs def filter_files_extensions(files: Collection[str], extensions: Collection[str]) -> list[str]: @@ -361,7 +361,7 @@ def get_full_path(folder_name: str, filename: str) -> str | None: if os.path.isfile(full_path): return full_path elif os.path.islink(full_path): - logging.warning("WARNING path {} exists but doesn't link anywhere, skipping.".format(full_path)) + logging.warning("WARNING path %s exists but doesn't link anywhere, skipping.", full_path) return None diff --git a/latent_preview.py b/latent_preview.py index d52e3f7a1..c3e72ba8c 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -102,7 +102,7 @@ def get_previewer(device, latent_format): taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) previewer = TAESDPreviewerImpl(taesd) else: - logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name)) + logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/%s", latent_format.taesd_decoder_name) if previewer is None: if latent_format.latent_rgb_factors is not None: diff --git a/main.py b/main.py index 4643dba5d..b449fd496 100644 --- a/main.py +++ b/main.py @@ -41,12 +41,11 @@ if __name__ == "__main__": os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device) os.environ["ASCEND_RT_VISIBLE_DEVICES"] = str(args.cuda_device) - logging.info("Set cuda device to: {}".format(args.cuda_device)) + logging.info("Set cuda device to: %s", args.cuda_device) if args.oneapi_device_selector is not None: os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector - logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector)) - + logging.info("Set oneapi device selector to: %s", args.oneapi_device_selector) if args.deterministic: if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" @@ -157,7 +156,7 @@ def execute_prestartup_script(): import_message = "" else: import_message = " (PRESTARTUP FAILED)" - logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.info("%6.1f seconds%s: %s", n[0], import_message, n[1]) logging.info("") apply_custom_paths() @@ -253,7 +252,7 @@ def prompt_worker(q, server_instance): execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time)) logging.info("Prompt executed in %s", execution_time) else: - logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) + logging.info("Prompt executed in %.2f seconds", execution_time) flags = q.get_flags() free_memory = flags.get("free_memory", False) @@ -399,8 +398,8 @@ def start_comfyui(asyncio_loop=None): if __name__ == "__main__": # Running directly, just start ComfyUI. - logging.info("Python version: {}".format(sys.version)) - logging.info("ComfyUI version: {}".format(comfyui_version.__version__)) + logging.info("Python version: %s", sys.version) + logging.info("ComfyUI version: %s", comfyui_version.__version__) if sys.version_info.major == 3 and sys.version_info.minor < 10: logging.warning("WARNING: You are using a python version older than 3.10, please upgrade to a newer one. 3.12 and above is recommended.") diff --git a/nodes.py b/nodes.py index 31e3b8b61..1720ef19f 100644 --- a/nodes.py +++ b/nodes.py @@ -2142,7 +2142,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom sys_module_name = module_path.replace(".", "_x_") try: - logging.debug("Trying to load custom node {}".format(module_path)) + logging.debug("Trying to load custom node %s", module_path) if os.path.isfile(module_path): module_spec = importlib.util.spec_from_file_location(sys_module_name, module_path) module_dir = os.path.split(module_path)[0] @@ -2171,7 +2171,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom EXTENSION_WEB_DIRS[project_name] = web_dir_path - logging.info("Automatically register web folder {} for {}".format(web_dir_name, project_name)) + logging.info("Automatically register web folder %s for %s", web_dir_name, project_name) except Exception as e: logging.warning("Unable to parse pyproject.toml due to lack dependency pydantic-settings, please run 'pip install -r requirements.txt': %s", e) @@ -2271,7 +2271,7 @@ async def init_external_custom_nodes(): import_message = "" else: import_message = " (IMPORT FAILED)" - logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.info("%6.1f seconds%s: %s", n[0], import_message, n[1]) logging.info("") async def init_builtin_extra_nodes(): @@ -2440,7 +2440,7 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True): if len(import_failed_api) > 0: logging.warning("WARNING: some comfy_api_nodes/ nodes did not import correctly. This may be because they are missing some dependencies.\n") for node in import_failed_api: - logging.warning("IMPORT FAILED: {}".format(node)) + logging.warning("IMPORT FAILED: %s", node) logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.") if args.windows_standalone_build: logging.warning("Please run the update script: update/update_comfyui.bat") @@ -2451,7 +2451,8 @@ async def init_extra_nodes(init_custom_nodes=True, init_api_nodes=True): if len(import_failed) > 0: logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n") for node in import_failed: - logging.warning("IMPORT FAILED: {}".format(node)) + logging.warning("IMPORT FAILED: %s", node) + # logging.warning("IMPORT FAILED: {}".format(node)) logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.") if args.windows_standalone_build: logging.warning("Please run the update script: update/update_comfyui.bat") diff --git a/server.py b/server.py index d7458d2c7..7b47f238e 100644 --- a/server.py +++ b/server.py @@ -57,12 +57,20 @@ def _remove_sensitive_from_queue(queue: list) -> list: async def send_socket_catch_exception(function, message): try: await function(message) - except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError, BrokenPipeError, ConnectionError) as err: - logging.warning("send error: {}".format(err)) + except ( + aiohttp.ClientError, + aiohttp.ClientPayloadError, + ConnectionResetError, + BrokenPipeError, + ConnectionError, + ) as err: + logging.warning("send error: %s", str(err)) + # Track deprecated paths that have been warned about to only warn once per file _deprecated_paths_warned = set() + @web.middleware async def deprecation_warning(request: web.Request, handler): """Middleware to warn about deprecated frontend API paths""" @@ -73,9 +81,12 @@ async def deprecation_warning(request: web.Request, handler): if path not in _deprecated_paths_warned: _deprecated_paths_warned.add(path) logging.warning( - f"[DEPRECATION WARNING] Detected import of deprecated legacy API: {path}. " - f"This is likely caused by a custom node extension using outdated APIs. " - f"Please update your extensions or contact the extension author for an updated version." + """ + [DEPRECATION WARNING] Detected import of deprecated legacy API: %s. + This is likely caused by a custom node extension using outdated APIs. + Please update your extensions or contact the extension author for an updated version. + """, + path, ) response: web.Response = await handler(request) @@ -104,14 +115,17 @@ def create_cors_middleware(allowed_origin: str): else: response = await handler(request) - response.headers['Access-Control-Allow-Origin'] = allowed_origin - response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS, PATCH' - response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' - response.headers['Access-Control-Allow-Credentials'] = 'true' + response.headers["Access-Control-Allow-Origin"] = allowed_origin + response.headers["Access-Control-Allow-Methods"] = ( + "POST, GET, DELETE, PUT, OPTIONS, PATCH" + ) + response.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization" + response.headers["Access-Control-Allow-Credentials"] = "true" return response return cors_middleware + def is_loopback(host): if host is None: return False @@ -141,28 +155,40 @@ def is_loopback(host): def create_origin_only_middleware(): @web.middleware async def origin_only_middleware(request: web.Request, handler): - #this code is used to prevent the case where a random website can queue comfy workflows by making a POST to 127.0.0.1 which browsers don't prevent for some dumb reason. - #in that case the Host and Origin hostnames won't match - #I know the proper fix would be to add a cookie but this should take care of the problem in the meantime - if 'Host' in request.headers and 'Origin' in request.headers: - host = request.headers['Host'] - origin = request.headers['Origin'] + # this code is used to prevent the case where a random website can queue comfy workflows by making a POST to 127.0.0.1 which browsers don't prevent for some dumb reason. + # in that case the Host and Origin hostnames won't match + # I know the proper fix would be to add a cookie but this should take care of the problem in the meantime + if "Host" in request.headers and "Origin" in request.headers: + host = request.headers["Host"] + origin = request.headers["Origin"] host_domain = host.lower() parsed = urllib.parse.urlparse(origin) origin_domain = parsed.netloc.lower() - host_domain_parsed = urllib.parse.urlsplit('//' + host_domain) + host_domain_parsed = urllib.parse.urlsplit("//" + host_domain) - #limit the check to when the host domain is localhost, this makes it slightly less safe but should still prevent the exploit + # limit the check to when the host domain is localhost, this makes it slightly less safe but should still prevent the exploit loopback = is_loopback(host_domain_parsed.hostname) - if parsed.port is None: #if origin doesn't have a port strip it from the host to handle weird browsers, same for host + if ( + parsed.port is None + ): # if origin doesn't have a port strip it from the host to handle weird browsers, same for host host_domain = host_domain_parsed.hostname if host_domain_parsed.port is None: origin_domain = parsed.hostname - if loopback and host_domain is not None and origin_domain is not None and len(host_domain) > 0 and len(origin_domain) > 0: + if ( + loopback + and host_domain is not None + and origin_domain is not None + and len(host_domain) > 0 + and len(origin_domain) > 0 + ): if host_domain != origin_domain: - logging.warning("WARNING: request with non matching host and origin {} != {}, returning 403".format(host_domain, origin_domain)) + logging.warning( + "request with non matching host and origin %s != %s, returning 403", + host_domain, + origin_domain, + ) return web.Response(status=403) if request.method == "OPTIONS": @@ -184,19 +210,21 @@ def create_block_external_middleware(): else: response = await handler(request) - response.headers['Content-Security-Policy'] = "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob:; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self'; connect-src 'self'; frame-src 'self'; object-src 'self';" + response.headers["Content-Security-Policy"] = ( + "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob:; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:; font-src 'self'; connect-src 'self'; frame-src 'self'; object-src 'self';" + ) return response return block_external_middleware -class PromptServer(): +class PromptServer: def __init__(self, loop): PromptServer.instance = self mimetypes.init() - mimetypes.add_type('application/javascript; charset=utf-8', '.js') - mimetypes.add_type('image/webp', '.webp') + mimetypes.add_type("application/javascript; charset=utf-8", ".js") + mimetypes.add_type("image/webp", ".webp") self.user_manager = UserManager() self.model_file_manager = ModelFileManager() @@ -207,7 +235,7 @@ class PromptServer(): self.prompt_queue = execution.PromptQueue(self) self.loop = loop self.messages = asyncio.Queue() - self.client_session:Optional[aiohttp.ClientSession] = None + self.client_session: Optional[aiohttp.ClientSession] = None self.number = 0 middlewares = [cache_control, deprecation_warning] @@ -226,7 +254,9 @@ class PromptServer(): middlewares.append(comfyui_manager.create_middleware()) max_upload_size = round(args.max_upload_size * 1024 * 1024) - self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) + self.app = web.Application( + client_max_size=max_upload_size, middlewares=middlewares + ) self.sockets = dict() self.sockets_metadata = dict() self.web_root = ( @@ -242,11 +272,11 @@ class PromptServer(): self.on_prompt_handlers = [] - @routes.get('/ws') + @routes.get("/ws") async def websocket_handler(request): ws = web.WebSocketResponse() await ws.prepare(request) - sid = request.rel_url.query.get('clientId', '') + sid = request.rel_url.query.get("clientId", "") if sid: # Reusing existing session, remove old self.sockets.pop(sid, None) @@ -260,17 +290,21 @@ class PromptServer(): try: # Send initial state to the new client - await self.send("status", {"status": self.get_queue_info(), "sid": sid}, sid) + await self.send( + "status", {"status": self.get_queue_info(), "sid": sid}, sid + ) # On reconnect if we are the currently executing client send the current node if self.client_id == sid and self.last_node_id is not None: - await self.send("executing", { "node": self.last_node_id }, sid) + await self.send("executing", {"node": self.last_node_id}, sid) # Flag to track if we've received the first message first_message = True async for msg in ws: if msg.type == aiohttp.WSMsgType.ERROR: - logging.warning('ws connection closed with exception %s' % ws.exception()) + logging.warning( + "ws connection closed with exception %s", ws.exception() + ) elif msg.type == aiohttp.WSMsgType.TEXT: try: data = json.loads(msg.data) @@ -278,7 +312,9 @@ class PromptServer(): if first_message and data.get("type") == "feature_flags": # Store client feature flags client_flags = data.get("data", {}) - self.sockets_metadata[sid]["feature_flags"] = client_flags + self.sockets_metadata[sid]["feature_flags"] = ( + client_flags + ) # Send server feature flags in response await self.send( @@ -288,12 +324,16 @@ class PromptServer(): ) logging.debug( - f"Feature flags negotiated for client {sid}: {client_flags}" + "Feature flags negotiated for client %s: %s", + sid, + client_flags, ) first_message = False except json.JSONDecodeError: logging.warning( - f"Invalid JSON received from client {sid}: {msg.data}" + "Invalid JSON received from client %s: %s", + sid, + msg.data, ) except Exception as e: logging.error("Error processing WebSocket message: %s", e) @@ -305,7 +345,7 @@ class PromptServer(): @routes.get("/") async def get_root(request): response = web.FileResponse(os.path.join(self.web_root, "index.html")) - response.headers['Cache-Control'] = 'no-cache' + response.headers["Cache-Control"] = "no-cache" response.headers["Pragma"] = "no-cache" response.headers["Expires"] = "0" return response @@ -313,7 +353,9 @@ class PromptServer(): @routes.get("/embeddings") def get_embeddings(request): embeddings = folder_paths.get_filename_list("embeddings") - return web.json_response(list(map(lambda a: os.path.splitext(a)[0], embeddings))) + return web.json_response( + list(map(lambda a: os.path.splitext(a)[0], embeddings)) + ) @routes.get("/models") def list_model_types(request): @@ -331,15 +373,34 @@ class PromptServer(): @routes.get("/extensions") async def get_extensions(request): - files = glob.glob(os.path.join( - glob.escape(self.web_root), 'extensions/**/*.js'), recursive=True) + files = glob.glob( + os.path.join(glob.escape(self.web_root), "extensions/**/*.js"), + recursive=True, + ) - extensions = list(map(lambda f: "/" + os.path.relpath(f, self.web_root).replace("\\", "/"), files)) + extensions = list( + map( + lambda f: "/" + + os.path.relpath(f, self.web_root).replace("\\", "/"), + files, + ) + ) for name, dir in nodes.EXTENSION_WEB_DIRS.items(): - files = glob.glob(os.path.join(glob.escape(dir), '**/*.js'), recursive=True) - extensions.extend(list(map(lambda f: "/extensions/" + urllib.parse.quote( - name) + "/" + os.path.relpath(f, dir).replace("\\", "/"), files))) + files = glob.glob( + os.path.join(glob.escape(dir), "**/*.js"), recursive=True + ) + extensions.extend( + list( + map( + lambda f: "/extensions/" + + urllib.parse.quote(name) + + "/" + + os.path.relpath(f, dir).replace("\\", "/"), + files, + ) + ) + ) return web.json_response(extensions) @@ -384,7 +445,9 @@ class PromptServer(): return web.Response(status=400) subfolder = post.get("subfolder", "") - full_output_folder = os.path.join(upload_dir, os.path.normpath(subfolder)) + full_output_folder = os.path.join( + upload_dir, os.path.normpath(subfolder) + ) filepath = os.path.abspath(os.path.join(full_output_folder, filename)) if os.path.commonpath((upload_dir, filepath)) != upload_dir: @@ -400,7 +463,9 @@ class PromptServer(): else: i = 1 while os.path.exists(filepath): - if compare_image_hash(filepath, image): #compare hash to prevent saving of duplicates with same name, fix for #3465 + if compare_image_hash( + filepath, image + ): # compare hash to prevent saving of duplicates with same name, fix for #3465 image_is_duplicate = True break filename = f"{split[0]} ({i}){split[1]}" @@ -414,7 +479,13 @@ class PromptServer(): with open(filepath, "wb") as f: f.write(image.file.read()) - return web.json_response({"name" : filename, "subfolder": subfolder, "type": image_upload_type}) + return web.json_response( + { + "name": filename, + "subfolder": subfolder, + "type": image_upload_type, + } + ) else: return web.Response(status=400) @@ -423,20 +494,21 @@ class PromptServer(): post = await request.post() return image_upload(post) - @routes.post("/upload/mask") async def upload_mask(request): post = await request.post() def image_save_function(image, post, filepath): original_ref = json.loads(post.get("original_ref")) - filename, output_dir = folder_paths.annotated_filepath(original_ref['filename']) + filename, output_dir = folder_paths.annotated_filepath( + original_ref["filename"] + ) if not filename: return web.Response(status=400) # validation for security: prevent accessing arbitrary path - if filename[0] == '/' or '..' in filename: + if filename[0] == "/" or ".." in filename: return web.Response(status=400) if output_dir is None: @@ -447,8 +519,15 @@ class PromptServer(): return web.Response(status=400) if original_ref.get("subfolder", "") != "": - full_output_dir = os.path.join(output_dir, original_ref["subfolder"]) - if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir: + full_output_dir = os.path.join( + output_dir, original_ref["subfolder"] + ) + if ( + os.path.commonpath( + (os.path.abspath(full_output_dir), output_dir) + ) + != output_dir + ): return web.Response(status=403) output_dir = full_output_dir @@ -457,14 +536,14 @@ class PromptServer(): if os.path.isfile(file): with Image.open(file) as original_pil: metadata = PngInfo() - if hasattr(original_pil,'text'): + if hasattr(original_pil, "text"): for key in original_pil.text: metadata.add_text(key, original_pil.text[key]) - original_pil = original_pil.convert('RGBA') - mask_pil = Image.open(image.file).convert('RGBA') + original_pil = original_pil.convert("RGBA") + mask_pil = Image.open(image.file).convert("RGBA") # alpha copy - new_alpha = mask_pil.getchannel('A') + new_alpha = mask_pil.getchannel("A") original_pil.putalpha(new_alpha) original_pil.save(filepath, compress_level=4, pnginfo=metadata) @@ -480,7 +559,7 @@ class PromptServer(): return web.Response(status=400) # validation for security: prevent accessing arbitrary path - if filename[0] == '/' or '..' in filename: + if filename[0] == "/" or ".." in filename: return web.Response(status=400) if output_dir is None: @@ -491,8 +570,15 @@ class PromptServer(): return web.Response(status=400) if "subfolder" in request.rel_url.query: - full_output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"]) - if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir: + full_output_dir = os.path.join( + output_dir, request.rel_url.query["subfolder"] + ) + if ( + os.path.commonpath( + (os.path.abspath(full_output_dir), output_dir) + ) + != output_dir + ): return web.Response(status=403) output_dir = full_output_dir @@ -500,76 +586,106 @@ class PromptServer(): file = os.path.join(output_dir, filename) if os.path.isfile(file): - if 'preview' in request.rel_url.query: + if "preview" in request.rel_url.query: with Image.open(file) as img: - preview_info = request.rel_url.query['preview'].split(';') + preview_info = request.rel_url.query["preview"].split(";") image_format = preview_info[0] - if image_format not in ['webp', 'jpeg'] or 'a' in request.rel_url.query.get('channel', ''): - image_format = 'webp' + if image_format not in [ + "webp", + "jpeg", + ] or "a" in request.rel_url.query.get("channel", ""): + image_format = "webp" quality = 90 if preview_info[-1].isdigit(): quality = int(preview_info[-1]) buffer = BytesIO() - if image_format in ['jpeg'] or request.rel_url.query.get('channel', '') == 'rgb': + if ( + image_format in ["jpeg"] + or request.rel_url.query.get("channel", "") == "rgb" + ): img = img.convert("RGB") img.save(buffer, format=image_format, quality=quality) buffer.seek(0) - return web.Response(body=buffer.read(), content_type=f'image/{image_format}', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + return web.Response( + body=buffer.read(), + content_type=f"image/{image_format}", + headers={ + "Content-Disposition": f'filename="{filename}"' + }, + ) - if 'channel' not in request.rel_url.query: - channel = 'rgba' + if "channel" not in request.rel_url.query: + channel = "rgba" else: channel = request.rel_url.query["channel"] - if channel == 'rgb': + if channel == "rgb": with Image.open(file) as img: if img.mode == "RGBA": r, g, b, a = img.split() - new_img = Image.merge('RGB', (r, g, b)) + new_img = Image.merge("RGB", (r, g, b)) else: new_img = img.convert("RGB") buffer = BytesIO() - new_img.save(buffer, format='PNG') + new_img.save(buffer, format="PNG") buffer.seek(0) - return web.Response(body=buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + return web.Response( + body=buffer.read(), + content_type="image/png", + headers={ + "Content-Disposition": f'filename="{filename}"' + }, + ) - elif channel == 'a': + elif channel == "a": with Image.open(file) as img: if img.mode == "RGBA": _, _, _, a = img.split() else: - a = Image.new('L', img.size, 255) + a = Image.new("L", img.size, 255) # alpha img - alpha_img = Image.new('RGBA', img.size) + alpha_img = Image.new("RGBA", img.size) alpha_img.putalpha(a) alpha_buffer = BytesIO() - alpha_img.save(alpha_buffer, format='PNG') + alpha_img.save(alpha_buffer, format="PNG") alpha_buffer.seek(0) - return web.Response(body=alpha_buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + return web.Response( + body=alpha_buffer.read(), + content_type="image/png", + headers={ + "Content-Disposition": f'filename="{filename}"' + }, + ) else: # Get content type from mimetype, defaulting to 'application/octet-stream' - content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream' + content_type = ( + mimetypes.guess_type(filename)[0] + or "application/octet-stream" + ) # For security, force certain mimetypes to download instead of display - if content_type in {'text/html', 'text/html-sandboxed', 'application/xhtml+xml', 'text/javascript', 'text/css'}: - content_type = 'application/octet-stream' # Forces download + if content_type in { + "text/html", + "text/html-sandboxed", + "application/xhtml+xml", + "text/javascript", + "text/css", + }: + content_type = "application/octet-stream" # Forces download return web.FileResponse( file, headers={ - "Content-Disposition": f"filename=\"{filename}\"", - "Content-Type": content_type - } + "Content-Disposition": f'filename="{filename}"', + "Content-Type": content_type, + }, ) return web.Response(status=404) @@ -589,7 +705,7 @@ class PromptServer(): safetensors_path = folder_paths.get_full_path(folder_name, filename) if safetensors_path is None: return web.Response(status=404) - out = comfy.utils.safetensors_header(safetensors_path, max_size=1024*1024) + out = comfy.utils.safetensors_header(safetensors_path, max_size=1024 * 1024) if out is None: return web.Response(status=404) dt = json.loads(out) @@ -604,11 +720,19 @@ class PromptServer(): cpu_device = comfy.model_management.torch.device("cpu") ram_total = comfy.model_management.get_total_memory(cpu_device) ram_free = comfy.model_management.get_free_memory(cpu_device) - vram_total, torch_vram_total = comfy.model_management.get_total_memory(device, torch_total_too=True) - vram_free, torch_vram_free = comfy.model_management.get_free_memory(device, torch_free_too=True) + vram_total, torch_vram_total = comfy.model_management.get_total_memory( + device, torch_total_too=True + ) + vram_free, torch_vram_free = comfy.model_management.get_free_memory( + device, torch_free_too=True + ) required_frontend_version = FrontendManager.get_required_frontend_version() - installed_templates_version = FrontendManager.get_installed_templates_version() - required_templates_version = FrontendManager.get_required_templates_version() + installed_templates_version = ( + FrontendManager.get_installed_templates_version() + ) + required_templates_version = ( + FrontendManager.get_required_templates_version() + ) system_stats = { "system": { @@ -621,8 +745,11 @@ class PromptServer(): "required_templates_version": required_templates_version, "python_version": sys.version, "pytorch_version": comfy.model_management.torch_version, - "embedded_python": os.path.split(os.path.split(sys.executable)[0])[1] == "python_embeded", - "argv": sys.argv + "embedded_python": os.path.split(os.path.split(sys.executable)[0])[ + 1 + ] + == "python_embeded", + "argv": sys.argv, }, "devices": [ { @@ -634,7 +761,7 @@ class PromptServer(): "torch_vram_total": torch_vram_total, "torch_vram_free": torch_vram_free, } - ] + ], } return web.json_response(system_stats) @@ -651,34 +778,53 @@ class PromptServer(): if issubclass(obj_class, _ComfyNodeInternal): return obj_class.GET_NODE_INFO_V1() info = {} - info['input'] = obj_class.INPUT_TYPES() - info['input_order'] = {key: list(value.keys()) for (key, value) in obj_class.INPUT_TYPES().items()} - info['output'] = obj_class.RETURN_TYPES - info['output_is_list'] = obj_class.OUTPUT_IS_LIST if hasattr(obj_class, 'OUTPUT_IS_LIST') else [False] * len(obj_class.RETURN_TYPES) - info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output'] - info['name'] = node_class - info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else node_class - info['description'] = obj_class.DESCRIPTION if hasattr(obj_class,'DESCRIPTION') else '' - info['python_module'] = getattr(obj_class, "RELATIVE_PYTHON_MODULE", "nodes") - info['category'] = 'sd' - if hasattr(obj_class, 'OUTPUT_NODE') and obj_class.OUTPUT_NODE == True: - info['output_node'] = True + info["input"] = obj_class.INPUT_TYPES() + info["input_order"] = { + key: list(value.keys()) + for (key, value) in obj_class.INPUT_TYPES().items() + } + info["output"] = obj_class.RETURN_TYPES + info["output_is_list"] = ( + obj_class.OUTPUT_IS_LIST + if hasattr(obj_class, "OUTPUT_IS_LIST") + else [False] * len(obj_class.RETURN_TYPES) + ) + info["output_name"] = ( + obj_class.RETURN_NAMES + if hasattr(obj_class, "RETURN_NAMES") + else info["output"] + ) + info["name"] = node_class + info["display_name"] = ( + nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] + if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() + else node_class + ) + info["description"] = ( + obj_class.DESCRIPTION if hasattr(obj_class, "DESCRIPTION") else "" + ) + info["python_module"] = getattr( + obj_class, "RELATIVE_PYTHON_MODULE", "nodes" + ) + info["category"] = "sd" + if hasattr(obj_class, "OUTPUT_NODE") and obj_class.OUTPUT_NODE == True: + info["output_node"] = True else: - info['output_node'] = False + info["output_node"] = False - if hasattr(obj_class, 'CATEGORY'): - info['category'] = obj_class.CATEGORY + if hasattr(obj_class, "CATEGORY"): + info["category"] = obj_class.CATEGORY - if hasattr(obj_class, 'OUTPUT_TOOLTIPS'): - info['output_tooltips'] = obj_class.OUTPUT_TOOLTIPS + if hasattr(obj_class, "OUTPUT_TOOLTIPS"): + info["output_tooltips"] = obj_class.OUTPUT_TOOLTIPS if getattr(obj_class, "DEPRECATED", False): - info['deprecated'] = True + info["deprecated"] = True if getattr(obj_class, "EXPERIMENTAL", False): - info['experimental'] = True + info["experimental"] = True - if hasattr(obj_class, 'API_NODE'): - info['api_node'] = obj_class.API_NODE + if hasattr(obj_class, "API_NODE"): + info["api_node"] = obj_class.API_NODE return info @routes.get("/object_info") @@ -689,7 +835,10 @@ class PromptServer(): try: out[x] = node_info(x) except Exception: - logging.error("[ERROR] An error occurred while retrieving information for the '%s' node.", x) + logging.error( + "[ERROR] An error occurred while retrieving information for the '%s' node.", + x, + ) logging.error(traceback.format_exc()) return web.json_response(out) @@ -715,60 +864,60 @@ class PromptServer(): """ query = request.rel_url.query - status_param = query.get('status') - workflow_id = query.get('workflow_id') - sort_by = query.get('sort_by', 'created_at').lower() - sort_order = query.get('sort_order', 'desc').lower() + status_param = query.get("status") + workflow_id = query.get("workflow_id") + sort_by = query.get("sort_by", "created_at").lower() + sort_order = query.get("sort_order", "desc").lower() status_filter = None if status_param: - status_filter = [s.strip().lower() for s in status_param.split(',') if s.strip()] + status_filter = [ + s.strip().lower() for s in status_param.split(",") if s.strip() + ] invalid_statuses = [s for s in status_filter if s not in JobStatus.ALL] if invalid_statuses: return web.json_response( - {"error": f"Invalid status value(s): {', '.join(invalid_statuses)}. Valid values: {', '.join(JobStatus.ALL)}"}, - status=400 + { + "error": f"Invalid status value(s): {', '.join(invalid_statuses)}. Valid values: {', '.join(JobStatus.ALL)}" + }, + status=400, ) - if sort_by not in {'created_at', 'execution_duration'}: + if sort_by not in {"created_at", "execution_duration"}: return web.json_response( {"error": "sort_by must be 'created_at' or 'execution_duration'"}, - status=400 + status=400, ) - if sort_order not in {'asc', 'desc'}: + if sort_order not in {"asc", "desc"}: return web.json_response( - {"error": "sort_order must be 'asc' or 'desc'"}, - status=400 + {"error": "sort_order must be 'asc' or 'desc'"}, status=400 ) limit = None # If limit is provided, validate that it is a positive integer, else continue without a limit - if 'limit' in query: + if "limit" in query: try: - limit = int(query.get('limit')) + limit = int(query.get("limit")) if limit <= 0: return web.json_response( - {"error": "limit must be a positive integer"}, - status=400 + {"error": "limit must be a positive integer"}, status=400 ) except (ValueError, TypeError): return web.json_response( - {"error": "limit must be an integer"}, - status=400 + {"error": "limit must be an integer"}, status=400 ) offset = 0 - if 'offset' in query: + if "offset" in query: try: - offset = int(query.get('offset')) + offset = int(query.get("offset")) if offset < 0: offset = 0 except (ValueError, TypeError): return web.json_response( - {"error": "offset must be an integer"}, - status=400 + {"error": "offset must be an integer"}, status=400 ) running, queued = self.prompt_queue.get_current_queue_volatile() @@ -778,36 +927,37 @@ class PromptServer(): queued = _remove_sensitive_from_queue(queued) jobs, total = get_all_jobs( - running, queued, history, + running, + queued, + history, status_filter=status_filter, workflow_id=workflow_id, sort_by=sort_by, sort_order=sort_order, limit=limit, - offset=offset + offset=offset, ) has_more = (offset + len(jobs)) < total - return web.json_response({ - 'jobs': jobs, - 'pagination': { - 'offset': offset, - 'limit': limit, - 'total': total, - 'has_more': has_more + return web.json_response( + { + "jobs": jobs, + "pagination": { + "offset": offset, + "limit": limit, + "total": total, + "has_more": has_more, + }, } - }) + ) @routes.get("/api/jobs/{job_id}") async def get_job_by_id(request): """Get a single job by ID.""" job_id = request.match_info.get("job_id", None) if not job_id: - return web.json_response( - {"error": "job_id is required"}, - status=400 - ) + return web.json_response({"error": "job_id is required"}, status=400) running, queued = self.prompt_queue.get_current_queue_volatile() history = self.prompt_queue.get_history(prompt_id=job_id) @@ -817,10 +967,7 @@ class PromptServer(): job = get_job(job_id, running, queued, history) if job is None: - return web.json_response( - {"error": "Job not found"}, - status=404 - ) + return web.json_response({"error": "Job not found"}, status=404) return web.json_response(job) @@ -836,7 +983,9 @@ class PromptServer(): else: offset = -1 - return web.json_response(self.prompt_queue.get_history(max_items=max_items, offset=offset)) + return web.json_response( + self.prompt_queue.get_history(max_items=max_items, offset=offset) + ) @routes.get("/history/{prompt_id}") async def get_history_prompt_id(request): @@ -847,22 +996,22 @@ class PromptServer(): async def get_queue(request): queue_info = {} current_queue = self.prompt_queue.get_current_queue_volatile() - queue_info['queue_running'] = _remove_sensitive_from_queue(current_queue[0]) - queue_info['queue_pending'] = _remove_sensitive_from_queue(current_queue[1]) + queue_info["queue_running"] = _remove_sensitive_from_queue(current_queue[0]) + queue_info["queue_pending"] = _remove_sensitive_from_queue(current_queue[1]) return web.json_response(queue_info) @routes.post("/prompt") async def post_prompt(request): logging.info("got prompt") - json_data = await request.json() + json_data = await request.json() json_data = self.trigger_on_prompt(json_data) if "number" in json_data: - number = float(json_data['number']) + number = float(json_data["number"]) else: number = self.number if "front" in json_data: - if json_data['front']: + if json_data["front"]: number = -number self.number += 1 @@ -875,7 +1024,9 @@ class PromptServer(): if "partial_execution_targets" in json_data: partial_execution_targets = json_data["partial_execution_targets"] - valid = await execution.validate_prompt(prompt_id, prompt, partial_execution_targets) + valid = await execution.validate_prompt( + prompt_id, prompt, partial_execution_targets + ) extra_data = {} if "extra_data" in json_data: extra_data = json_data["extra_data"] @@ -888,30 +1039,49 @@ class PromptServer(): for sensitive_val in execution.SENSITIVE_EXTRA_DATA_KEYS: if sensitive_val in extra_data: sensitive[sensitive_val] = extra_data.pop(sensitive_val) - extra_data["create_time"] = int(time.time() * 1000) # timestamp in milliseconds - self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive)) - response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]} + extra_data["create_time"] = int( + time.time() * 1000 + ) # timestamp in milliseconds + self.prompt_queue.put( + ( + number, + prompt_id, + prompt, + extra_data, + outputs_to_execute, + sensitive, + ) + ) + response = { + "prompt_id": prompt_id, + "number": number, + "node_errors": valid[3], + } return web.json_response(response) else: - logging.warning("invalid prompt: {}".format(valid[1])) - return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400) + logging.warning("invalid prompt: %s", valid[1]) + return web.json_response( + {"error": valid[1], "node_errors": valid[3]}, status=400 + ) else: error = { "type": "no_prompt", "message": "No prompt provided", "details": "No prompt provided", - "extra_info": {} + "extra_info": {}, } - return web.json_response({"error": error, "node_errors": {}}, status=400) + return web.json_response( + {"error": error, "node_errors": {}}, status=400 + ) @routes.post("/queue") async def post_queue(request): - json_data = await request.json() + json_data = await request.json() if "clear" in json_data: if json_data["clear"]: self.prompt_queue.wipe_queue() if "delete" in json_data: - to_delete = json_data['delete'] + to_delete = json_data["delete"] for id_to_delete in to_delete: delete_func = lambda a: a[1] == id_to_delete self.prompt_queue.delete_queue_item(delete_func) @@ -926,7 +1096,7 @@ class PromptServer(): json_data = {} # Check if a specific prompt_id was provided for targeted interruption - prompt_id = json_data.get('prompt_id') + prompt_id = json_data.get("prompt_id") if prompt_id: currently_running, _ = self.prompt_queue.get_current_queue() @@ -942,7 +1112,10 @@ class PromptServer(): if should_interrupt: nodes.interrupt_processing() else: - logging.info("Prompt %s is not currently running, skipping interrupt", prompt_id) + logging.info( + "Prompt %s is not currently running, skipping interrupt", + prompt_id, + ) else: # No prompt_id provided, do a global interrupt logging.info("Global interrupt (no prompt_id specified)") @@ -963,27 +1136,29 @@ class PromptServer(): @routes.post("/history") async def post_history(request): - json_data = await request.json() + json_data = await request.json() if "clear" in json_data: if json_data["clear"]: self.prompt_queue.wipe_history() if "delete" in json_data: - to_delete = json_data['delete'] + to_delete = json_data["delete"] for id_to_delete in to_delete: self.prompt_queue.delete_history_item(id_to_delete) return web.Response(status=200) async def setup(self): - timeout = aiohttp.ClientTimeout(total=None) # no timeout + timeout = aiohttp.ClientTimeout(total=None) # no timeout self.client_session = aiohttp.ClientSession(timeout=timeout) def add_routes(self): self.user_manager.add_routes(self.routes) self.model_file_manager.add_routes(self.routes) - self.custom_node_manager.add_routes(self.routes, self.app, nodes.LOADED_MODULE_DIRS.items()) + self.custom_node_manager.add_routes( + self.routes, self.app, nodes.LOADED_MODULE_DIRS.items() + ) self.subgraph_manager.add_routes(self.routes, nodes.LOADED_MODULE_DIRS.items()) - self.app.add_subapp('/internal', self.internal_routes.get_app()) + self.app.add_subapp("/internal", self.internal_routes.get_app()) # Prefix every route with /api for easier matching for delegation. # This is very useful for frontend dev server, which need to forward @@ -995,22 +1170,23 @@ class PromptServer(): # Custom nodes might add extra static routes. Only process non-static # routes to add /api prefix. if isinstance(route, web.RouteDef): - api_routes.route(route.method, "/api" + route.path)(route.handler, **route.kwargs) + api_routes.route(route.method, "/api" + route.path)( + route.handler, **route.kwargs + ) self.app.add_routes(api_routes) self.app.add_routes(self.routes) # Add routes from web extensions. for name, dir in nodes.EXTENSION_WEB_DIRS.items(): - self.app.add_routes([web.static('/extensions/' + name, dir)]) + self.app.add_routes([web.static("/extensions/" + name, dir)]) installed_templates_version = FrontendManager.get_installed_templates_version() use_legacy_templates = True if installed_templates_version: try: - use_legacy_templates = ( - parse_version(installed_templates_version) - < parse_version("0.3.0") - ) + use_legacy_templates = parse_version( + installed_templates_version + ) < parse_version("0.3.0") except Exception as exc: logging.warning( "Unable to parse templates version '%s': %s", @@ -1021,9 +1197,7 @@ class PromptServer(): if use_legacy_templates: workflow_templates_path = FrontendManager.legacy_templates_path() if workflow_templates_path: - self.app.add_routes([ - web.static('/templates', workflow_templates_path) - ]) + self.app.add_routes([web.static("/templates", workflow_templates_path)]) else: handler = FrontendManager.template_asset_handler() if handler: @@ -1032,19 +1206,19 @@ class PromptServer(): # Serve embedded documentation from the package embedded_docs_path = FrontendManager.embedded_docs_path() if embedded_docs_path: - self.app.add_routes([ - web.static('/docs', embedded_docs_path) - ]) + self.app.add_routes([web.static("/docs", embedded_docs_path)]) - self.app.add_routes([ - web.static('/', self.web_root), - ]) + self.app.add_routes( + [ + web.static("/", self.web_root), + ] + ) def get_queue_info(self): prompt_info = {} exec_info = {} - exec_info['queue_remaining'] = self.prompt_queue.get_tasks_remaining() - prompt_info['exec_info'] = exec_info + exec_info["queue_remaining"] = self.prompt_queue.get_tasks_remaining() + prompt_info["exec_info"] = exec_info return prompt_info async def send(self, event, data, sid=None): @@ -1073,7 +1247,7 @@ class PromptServer(): image = image_data[1] max_size = image_data[2] if max_size is not None: - if hasattr(Image, 'Resampling'): + if hasattr(Image, "Resampling"): resampling = Image.Resampling.BILINEAR else: resampling = Image.Resampling.LANCZOS @@ -1097,7 +1271,7 @@ class PromptServer(): image = image_data[1] max_size = image_data[2] if max_size is not None: - if hasattr(Image, 'Resampling'): + if hasattr(Image, "Resampling"): resampling = Image.Resampling.BILINEAR else: resampling = Image.Resampling.LANCZOS @@ -1113,7 +1287,8 @@ class PromptServer(): # Serialize metadata as JSON import json - metadata_json = json.dumps(metadata).encode('utf-8') + + metadata_json = json.dumps(metadata).encode("utf-8") metadata_length = len(metadata_json) # Prepare image data @@ -1127,7 +1302,9 @@ class PromptServer(): combined_data.extend(metadata_json) combined_data.extend(image_bytes) - await self.send_bytes(BinaryEventTypes.PREVIEW_IMAGE_WITH_METADATA, combined_data, sid=sid) + await self.send_bytes( + BinaryEventTypes.PREVIEW_IMAGE_WITH_METADATA, combined_data, sid=sid + ) async def send_bytes(self, event, data, sid=None): message = self.encode_bytes(event, data) @@ -1150,11 +1327,10 @@ class PromptServer(): await send_socket_catch_exception(self.sockets[sid].send_json, message) def send_sync(self, event, data, sid=None): - self.loop.call_soon_threadsafe( - self.messages.put_nowait, (event, data, sid)) + self.loop.call_soon_threadsafe(self.messages.put_nowait, (event, data, sid)) def queue_updated(self): - self.send_sync("status", { "status": self.get_queue_info() }) + self.send_sync("status", {"status": self.get_queue_info()}) async def publish_loop(self): while True: @@ -1170,9 +1346,12 @@ class PromptServer(): ssl_ctx = None scheme = "http" if args.tls_keyfile and args.tls_certfile: - ssl_ctx = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_SERVER, verify_mode=ssl.CERT_NONE) - ssl_ctx.load_cert_chain(certfile=args.tls_certfile, - keyfile=args.tls_keyfile) + ssl_ctx = ssl.SSLContext( + protocol=ssl.PROTOCOL_TLS_SERVER, verify_mode=ssl.CERT_NONE + ) + ssl_ctx.load_cert_chain( + certfile=args.tls_certfile, keyfile=args.tls_keyfile + ) scheme = "https" if verbose: @@ -1183,17 +1362,19 @@ class PromptServer(): site = web.TCPSite(runner, address, port, ssl_context=ssl_ctx) await site.start() - if not hasattr(self, 'address'): - self.address = address #TODO: remove this + if not hasattr(self, "address"): + self.address = address # TODO: remove this self.port = port - if ':' in address: + if ":" in address: address_print = "[{}]".format(address) else: address_print = address if verbose: - logging.info("To see the GUI go to: {}://{}:{}".format(scheme, address_print, port)) + logging.info( + "To see the GUI go to: %s://%s:%s", scheme, address_print, port + ) if call_on_start is not None: call_on_start(scheme, self.address, self.port) @@ -1206,7 +1387,9 @@ class PromptServer(): try: json_data = handler(json_data) except Exception: - logging.warning("[ERROR] An error occurred during the on_prompt_handler processing") + logging.warning( + "[ERROR] An error occurred during the on_prompt_handler processing" + ) logging.warning(traceback.format_exc()) return json_data diff --git a/utils/extra_config.py b/utils/extra_config.py index a0fcda9e8..3e7caaba0 100644 --- a/utils/extra_config.py +++ b/utils/extra_config.py @@ -30,5 +30,5 @@ def load_extra_path_config(yaml_path): elif not os.path.isabs(full_path): full_path = os.path.abspath(os.path.join(yaml_dir, y)) normalized_path = os.path.normpath(full_path) - logging.info("Adding extra search path {} {}".format(x, normalized_path)) + logging.info("Adding extra search path %s: %s", x, normalized_path) folder_paths.add_model_folder_path(x, normalized_path, is_default)